1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
11 #include <rte_kvargs.h>
13 #include <rte_bbdev.h>
14 #include <rte_bbdev_pmd.h>
16 #include <phy_turbo.h>
18 #include <phy_rate_match.h>
21 #define DRIVER_NAME turbo_sw
23 /* Turbo SW PMD logging ID */
24 static int bbdev_turbo_sw_logtype;
26 /* Helper macro for logging */
27 #define rte_bbdev_log(level, fmt, ...) \
28 rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \
31 #define rte_bbdev_log_debug(fmt, ...) \
32 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
35 /* private data structure */
36 struct bbdev_private {
37 unsigned int max_nb_queues; /**< Max number of queues */
40 /* Initialisation params structure that can be used by Turbo SW driver */
41 struct turbo_sw_params {
42 int socket_id; /*< Turbo SW device socket */
43 uint16_t queues_num; /*< Turbo SW device queues number */
46 /* Accecptable params for Turbo SW devices */
47 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
48 #define TURBO_SW_SOCKET_ID_ARG "socket_id"
50 static const char * const turbo_sw_valid_params[] = {
51 TURBO_SW_MAX_NB_QUEUES_ARG,
52 TURBO_SW_SOCKET_ID_ARG
56 struct turbo_sw_queue {
57 /* Ring for processed (encoded/decoded) operations which are ready to
60 struct rte_ring *processed_pkts;
61 /* Stores input for turbo encoder (used when CRC attachment is
65 /* Stores output from turbo encoder */
67 /* Alpha gamma buf for bblib_turbo_decoder() function */
69 /* Temp buf for bblib_turbo_decoder() function */
71 /* Input buf for bblib_rate_dematching_lte() function */
73 /* Output buf for bblib_rate_dematching_lte() function */
74 uint8_t *deint_output;
75 /* Output buf for bblib_turbodec_adapter_lte() function */
76 uint8_t *adapter_output;
77 /* Operation type of this queue */
78 enum rte_bbdev_op_type type;
79 } __rte_cache_aligned;
81 /* Calculate index based on Table 5.1.3-3 from TS34.212 */
83 compute_idx(uint16_t k)
87 if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE)
91 if ((k - 2048) % 64 != 0)
94 result = 124 + (k - 2048) / 64;
95 } else if (k <= 512) {
96 if ((k - 40) % 8 != 0)
99 result = (k - 40) / 8 + 1;
100 } else if (k <= 1024) {
101 if ((k - 512) % 16 != 0)
104 result = 60 + (k - 512) / 16;
105 } else { /* 1024 < k <= 2048 */
106 if ((k - 1024) % 32 != 0)
109 result = 92 + (k - 1024) / 32;
115 /* Read flag value 0/1 from bitmap */
117 check_bit(uint32_t bitmap, uint32_t bitmask)
119 return bitmap & bitmask;
122 /* Get device info */
124 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
126 struct bbdev_private *internals = dev->data->dev_private;
128 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
130 .type = RTE_BBDEV_OP_TURBO_DEC,
133 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
134 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
135 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
136 RTE_BBDEV_TURBO_CRC_TYPE_24B |
137 RTE_BBDEV_TURBO_EARLY_TERMINATION,
138 .max_llr_modulus = 16,
139 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
140 .num_buffers_hard_out =
141 RTE_BBDEV_MAX_CODE_BLOCKS,
142 .num_buffers_soft_out = 0,
146 .type = RTE_BBDEV_OP_TURBO_ENC,
149 RTE_BBDEV_TURBO_CRC_24B_ATTACH |
150 RTE_BBDEV_TURBO_CRC_24A_ATTACH |
151 RTE_BBDEV_TURBO_RATE_MATCH |
152 RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
153 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
154 .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
157 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
160 static struct rte_bbdev_queue_conf default_queue_conf = {
161 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
164 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2;
166 default_queue_conf.socket = dev->data->socket_id;
168 dev_info->driver_name = RTE_STR(DRIVER_NAME);
169 dev_info->max_num_queues = internals->max_nb_queues;
170 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
171 dev_info->hardware_accelerated = false;
172 dev_info->max_queue_priority = 0;
173 dev_info->default_queue_conf = default_queue_conf;
174 dev_info->capabilities = bbdev_capabilities;
175 dev_info->cpu_flag_reqs = &cpu_flag;
176 dev_info->min_alignment = 64;
178 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
183 q_release(struct rte_bbdev *dev, uint16_t q_id)
185 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private;
188 rte_ring_free(q->processed_pkts);
189 rte_free(q->enc_out);
192 rte_free(q->code_block);
193 rte_free(q->deint_input);
194 rte_free(q->deint_output);
195 rte_free(q->adapter_output);
197 dev->data->queues[q_id].queue_private = NULL;
200 rte_bbdev_log_debug("released device queue %u:%u",
201 dev->data->dev_id, q_id);
207 q_setup(struct rte_bbdev *dev, uint16_t q_id,
208 const struct rte_bbdev_queue_conf *queue_conf)
211 struct turbo_sw_queue *q;
212 char name[RTE_RING_NAMESIZE];
214 /* Allocate the queue data structure. */
215 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
216 RTE_CACHE_LINE_SIZE, queue_conf->socket);
218 rte_bbdev_log(ERR, "Failed to allocate queue memory");
222 /* Allocate memory for encoder output. */
223 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u",
224 dev->data->dev_id, q_id);
225 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
227 "Creating queue name for device %u queue %u failed",
228 dev->data->dev_id, q_id);
229 return -ENAMETOOLONG;
231 q->enc_out = rte_zmalloc_socket(name,
232 ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
233 sizeof(*q->enc_out) * 3,
234 RTE_CACHE_LINE_SIZE, queue_conf->socket);
235 if (q->enc_out == NULL) {
237 "Failed to allocate queue memory for %s", name);
241 /* Allocate memory for rate matching output. */
242 ret = snprintf(name, RTE_RING_NAMESIZE,
243 RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id,
245 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
247 "Creating queue name for device %u queue %u failed",
248 dev->data->dev_id, q_id);
249 return -ENAMETOOLONG;
251 q->enc_in = rte_zmalloc_socket(name,
252 (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
253 RTE_CACHE_LINE_SIZE, queue_conf->socket);
254 if (q->enc_in == NULL) {
256 "Failed to allocate queue memory for %s", name);
260 /* Allocate memory for Aplha Gamma temp buffer. */
261 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u",
262 dev->data->dev_id, q_id);
263 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
265 "Creating queue name for device %u queue %u failed",
266 dev->data->dev_id, q_id);
267 return -ENAMETOOLONG;
269 q->ag = rte_zmalloc_socket(name,
270 RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
271 RTE_CACHE_LINE_SIZE, queue_conf->socket);
274 "Failed to allocate queue memory for %s", name);
278 /* Allocate memory for code block temp buffer. */
279 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u",
280 dev->data->dev_id, q_id);
281 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
283 "Creating queue name for device %u queue %u failed",
284 dev->data->dev_id, q_id);
285 return -ENAMETOOLONG;
287 q->code_block = rte_zmalloc_socket(name,
288 (6144 >> 3) * sizeof(*q->code_block),
289 RTE_CACHE_LINE_SIZE, queue_conf->socket);
290 if (q->code_block == NULL) {
292 "Failed to allocate queue memory for %s", name);
296 /* Allocate memory for Deinterleaver input. */
297 ret = snprintf(name, RTE_RING_NAMESIZE,
298 RTE_STR(DRIVER_NAME)"_deint_input%u:%u",
299 dev->data->dev_id, q_id);
300 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
302 "Creating queue name for device %u queue %u failed",
303 dev->data->dev_id, q_id);
304 return -ENAMETOOLONG;
306 q->deint_input = rte_zmalloc_socket(name,
307 RTE_BBDEV_MAX_KW * sizeof(*q->deint_input),
308 RTE_CACHE_LINE_SIZE, queue_conf->socket);
309 if (q->deint_input == NULL) {
311 "Failed to allocate queue memory for %s", name);
315 /* Allocate memory for Deinterleaver output. */
316 ret = snprintf(name, RTE_RING_NAMESIZE,
317 RTE_STR(DRIVER_NAME)"_deint_output%u:%u",
318 dev->data->dev_id, q_id);
319 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
321 "Creating queue name for device %u queue %u failed",
322 dev->data->dev_id, q_id);
323 return -ENAMETOOLONG;
325 q->deint_output = rte_zmalloc_socket(NULL,
326 RTE_BBDEV_MAX_KW * sizeof(*q->deint_output),
327 RTE_CACHE_LINE_SIZE, queue_conf->socket);
328 if (q->deint_output == NULL) {
330 "Failed to allocate queue memory for %s", name);
334 /* Allocate memory for Adapter output. */
335 ret = snprintf(name, RTE_RING_NAMESIZE,
336 RTE_STR(DRIVER_NAME)"_adapter_output%u:%u",
337 dev->data->dev_id, q_id);
338 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
340 "Creating queue name for device %u queue %u failed",
341 dev->data->dev_id, q_id);
342 return -ENAMETOOLONG;
344 q->adapter_output = rte_zmalloc_socket(NULL,
345 RTE_BBDEV_MAX_CB_SIZE * 6 * sizeof(*q->adapter_output),
346 RTE_CACHE_LINE_SIZE, queue_conf->socket);
347 if (q->adapter_output == NULL) {
349 "Failed to allocate queue memory for %s", name);
353 /* Create ring for packets awaiting to be dequeued. */
354 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u",
355 dev->data->dev_id, q_id);
356 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
358 "Creating queue name for device %u queue %u failed",
359 dev->data->dev_id, q_id);
360 return -ENAMETOOLONG;
362 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
363 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
364 if (q->processed_pkts == NULL) {
365 rte_bbdev_log(ERR, "Failed to create ring for %s", name);
369 q->type = queue_conf->op_type;
371 dev->data->queues[q_id].queue_private = q;
372 rte_bbdev_log_debug("setup device queue %s", name);
376 rte_ring_free(q->processed_pkts);
377 rte_free(q->enc_out);
380 rte_free(q->code_block);
381 rte_free(q->deint_input);
382 rte_free(q->deint_output);
383 rte_free(q->adapter_output);
388 static const struct rte_bbdev_ops pmd_ops = {
389 .info_get = info_get,
390 .queue_setup = q_setup,
391 .queue_release = q_release
394 /* Checks if the encoder input buffer is correct.
395 * Returns 0 if it's valid, -1 otherwise.
398 is_enc_input_valid(const uint16_t k, const int32_t k_idx,
399 const uint16_t in_length)
402 rte_bbdev_log(ERR, "K Index is invalid");
406 if (in_length - (k >> 3) < 0) {
408 "Mismatch between input length (%u bytes) and K (%u bits)",
413 if (k > RTE_BBDEV_MAX_CB_SIZE) {
414 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
415 k, RTE_BBDEV_MAX_CB_SIZE);
422 /* Checks if the decoder input buffer is correct.
423 * Returns 0 if it's valid, -1 otherwise.
426 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
429 rte_bbdev_log(ERR, "K index is invalid");
433 if (in_length - kw < 0) {
435 "Mismatch between input length (%u) and kw (%u)",
440 if (kw > RTE_BBDEV_MAX_KW) {
441 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
442 kw, RTE_BBDEV_MAX_KW);
450 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
451 uint8_t r, uint8_t c, uint16_t k, uint16_t ncb,
452 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
453 uint16_t in_offset, uint16_t out_offset, uint16_t total_left)
458 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
459 uint64_t first_3_bytes = 0;
460 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
461 struct bblib_crc_request crc_req;
462 struct bblib_crc_response crc_resp;
463 struct bblib_turbo_encoder_request turbo_req;
464 struct bblib_turbo_encoder_response turbo_resp;
465 struct bblib_rate_match_dl_request rm_req;
466 struct bblib_rate_match_dl_response rm_resp;
468 k_idx = compute_idx(k);
469 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
471 /* CRC24A (for TB) */
472 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) &&
473 (enc->code_block_mode == 1)) {
474 ret = is_enc_input_valid(k - 24, k_idx, total_left);
476 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
480 crc_req.len = (k - 24) >> 3;
481 /* Check if there is a room for CRC bits. If not use
482 * the temporary buffer.
484 if (rte_pktmbuf_append(m_in, 3) == NULL) {
485 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
488 /* Store 3 first bytes of next CB as they will be
489 * overwritten by CRC bytes. If it is the last CB then
490 * there is no point to store 3 next bytes and this
491 * if..else branch will be omitted.
493 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
497 bblib_lte_crc24a_gen(&crc_req, &crc_resp);
498 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
500 ret = is_enc_input_valid(k - 24, k_idx, total_left);
502 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
506 crc_req.len = (k - 24) >> 3;
507 /* Check if there is a room for CRC bits. If this is the last
508 * CB in TB. If not use temporary buffer.
510 if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) {
511 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
513 } else if (c - r > 1) {
514 /* Store 3 first bytes of next CB as they will be
515 * overwritten by CRC bytes. If it is the last CB then
516 * there is no point to store 3 next bytes and this
517 * if..else branch will be omitted.
519 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
523 bblib_lte_crc24b_gen(&crc_req, &crc_resp);
525 ret = is_enc_input_valid(k, k_idx, total_left);
527 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
534 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
535 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
536 * So dst_data's length should be 3*(k/8) + 3 bytes.
537 * In Rate-matching bypass case outputs pointers passed to encoder
538 * (out0, out1 and out2) can directly point to addresses of output from
541 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
543 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
544 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
546 out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2);
548 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
550 "Too little space in output mbuf");
553 enc->output.length += (k >> 3) * 3 + 2;
554 /* rte_bbdev_op_data.offset can be different than the
555 * offset of the appended bytes
557 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
558 out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
559 out_offset + (k >> 3) + 1);
560 out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
561 out_offset + 2 * ((k >> 3) + 1));
564 turbo_req.case_id = k_idx;
565 turbo_req.input_win = in;
566 turbo_req.length = k >> 3;
567 turbo_resp.output_win_0 = out0;
568 turbo_resp.output_win_1 = out1;
569 turbo_resp.output_win_2 = out2;
570 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
571 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
572 rte_bbdev_log(ERR, "Turbo Encoder failed");
576 /* Restore 3 first bytes of next CB if they were overwritten by CRC*/
577 if (first_3_bytes != 0)
578 *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes;
581 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
583 /* Integer round up division by 8 */
584 uint16_t out_len = (e + 7) >> 3;
585 /* The mask array is indexed using E%8. E is an even number so
586 * there are only 4 possible values.
588 const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC};
590 /* get output data starting address */
591 rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len);
592 if (rm_out == NULL) {
593 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
595 "Too little space in output mbuf");
598 /* rte_bbdev_op_data.offset can be different than the offset
599 * of the appended bytes
601 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
603 /* index of current code block */
605 /* total number of code block */
607 /* For DL - 1, UL - 0 */
608 rm_req.direction = 1;
609 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO
610 * and MDL_HARQ are used for Ncb calculation. As Ncb is already
611 * known we can adjust those parameters
613 rm_req.Nsoft = ncb * rm_req.C;
616 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G
617 * are used for E calculation. As E is already known we can
618 * adjust those parameters
622 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C;
624 rm_req.rvidx = enc->rv_index;
625 rm_req.Kidx = k_idx - 1;
630 rm_resp.output = rm_out;
631 rm_resp.OutputLen = out_len;
632 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
633 rm_req.bypass_rvidx = 1;
635 rm_req.bypass_rvidx = 0;
637 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
638 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
639 rte_bbdev_log(ERR, "Rate matching failed");
643 /* SW fills an entire last byte even if E%8 != 0. Clear the
644 * superfluous data bits for consistency with HW device.
646 mask_id = (e & 7) >> 1;
647 rm_out[out_len - 1] &= mask_out[mask_id];
649 enc->output.length += rm_resp.OutputLen;
651 /* Rate matching is bypassed */
653 /* Completing last byte of out0 (where 4 tail bits are stored)
654 * by moving first 4 bits from out1
656 tmp_out = (uint8_t *) --out1;
657 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4);
659 /* Shifting out1 data by 4 bits to the left */
660 for (m = 0; m < k >> 3; ++m) {
661 uint8_t *first = tmp_out;
662 uint8_t second = *(tmp_out + 1);
663 *first = (*first << 4) | ((second & 0xF0) >> 4);
666 /* Shifting out2 data by 8 bits to the left */
667 for (m = 0; m < (k >> 3) + 1; ++m) {
668 *tmp_out = *(tmp_out + 1);
676 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
678 uint8_t c, r, crc24_bits = 0;
681 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
682 uint16_t in_offset = enc->input.offset;
683 uint16_t out_offset = enc->output.offset;
684 struct rte_mbuf *m_in = enc->input.data;
685 struct rte_mbuf *m_out = enc->output.data;
686 uint16_t total_left = enc->input.length;
688 /* Clear op status */
691 if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) {
692 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
693 total_left, RTE_BBDEV_MAX_TB_SIZE);
694 op->status = 1 << RTE_BBDEV_DATA_ERROR;
698 if (m_in == NULL || m_out == NULL) {
699 rte_bbdev_log(ERR, "Invalid mbuf pointer");
700 op->status = 1 << RTE_BBDEV_DATA_ERROR;
704 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
705 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
708 if (enc->code_block_mode == 0) { /* For Transport Block mode */
709 c = enc->tb_params.c;
710 r = enc->tb_params.r;
711 } else {/* For Code Block mode */
716 while (total_left > 0 && r < c) {
717 if (enc->code_block_mode == 0) {
718 k = (r < enc->tb_params.c_neg) ?
719 enc->tb_params.k_neg : enc->tb_params.k_pos;
720 ncb = (r < enc->tb_params.c_neg) ?
721 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos;
722 e = (r < enc->tb_params.cab) ?
723 enc->tb_params.ea : enc->tb_params.eb;
725 k = enc->cb_params.k;
726 ncb = enc->cb_params.ncb;
727 e = enc->cb_params.e;
730 process_enc_cb(q, op, r, c, k, ncb, e, m_in,
731 m_out, in_offset, out_offset, total_left);
732 /* Update total_left */
733 total_left -= (k - crc24_bits) >> 3;
734 /* Update offsets for next CBs (if exist) */
735 in_offset += (k - crc24_bits) >> 3;
736 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH)
737 out_offset += e >> 3;
739 out_offset += (k >> 3) * 3 + 2;
743 /* check if all input data was processed */
744 if (total_left != 0) {
745 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
747 "Mismatch between mbuf length and included CBs sizes");
751 static inline uint16_t
752 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
757 for (i = 0; i < nb_ops; ++i)
758 enqueue_enc_one_op(q, ops[i]);
760 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
764 /* Remove the padding bytes from a cyclic buffer.
765 * The input buffer is a data stream wk as described in 3GPP TS 36.212 section
766 * 5.1.4.1.2 starting from w0 and with length Ncb bytes.
767 * The output buffer is a data stream wk with pruned padding bytes. It's length
768 * is 3*D bytes and the order of non-padding bytes is preserved.
771 remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
774 uint32_t in_idx, out_idx, c_idx;
775 const uint32_t d = k + 4;
776 const uint32_t kw = (ncb / 3);
777 const uint32_t nd = kw - d;
778 const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK;
779 /* Inter-column permutation pattern */
780 const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28,
781 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13,
782 29, 3, 19, 11, 27, 7, 23, 15, 31};
786 /* The padding bytes are at the first Nd positions in the first row. */
787 for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) {
789 rte_memcpy(&out[out_idx], &in[in_idx + 1],
791 out_idx += r_subblock - 1;
793 rte_memcpy(&out[out_idx], &in[in_idx], r_subblock);
794 out_idx += r_subblock;
798 /* First and second parity bits sub-blocks are interlaced. */
799 for (c_idx = 0; in_idx < ncb - 2 * r_subblock;
800 in_idx += 2 * r_subblock, ++c_idx) {
801 uint32_t second_block_c_idx = P[c_idx];
802 uint32_t third_block_c_idx = P[c_idx] + 1;
804 if (second_block_c_idx < nd && third_block_c_idx < nd) {
805 rte_memcpy(&out[out_idx], &in[in_idx + 2],
807 out_idx += 2 * r_subblock - 2;
808 } else if (second_block_c_idx >= nd &&
809 third_block_c_idx >= nd) {
810 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock);
811 out_idx += 2 * r_subblock;
812 } else if (second_block_c_idx < nd) {
813 out[out_idx++] = in[in_idx];
814 rte_memcpy(&out[out_idx], &in[in_idx + 2],
816 out_idx += 2 * r_subblock - 2;
818 rte_memcpy(&out[out_idx], &in[in_idx + 1],
820 out_idx += 2 * r_subblock - 1;
824 /* Last interlaced row is different - its last byte is the only padding
825 * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block.
826 * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
827 * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
828 * (moving to another column). 2nd parity sub-block uses the same
829 * inter-column permutation pattern as the systematic and 1st parity
830 * sub-blocks but it adds '1' to the resulting index and calculates the
831 * modulus of the result and Kw. Last column is mapped to itself (id 31)
832 * so the first byte taken from the 2nd parity sub-block will be the
833 * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
834 * last byte will be the first byte from the sub-block:
835 * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
836 * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the
837 * padding bytes. The bytes from the 1st parity sub-block are the bytes
838 * from the 31st column - Nd can't be greater than 28 so we are sure
839 * that there are no padding bytes in 31st column.
841 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
845 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
849 uint16_t kpi = ncb / 3;
850 uint16_t nd = kpi - d;
852 rte_memcpy(&out[nd], in, d);
853 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
854 rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d);
858 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
859 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
860 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
861 bool check_crc_24b, uint16_t total_left)
866 uint8_t *in, *out, *adapter_input;
867 int32_t ncb, ncb_without_null;
868 struct bblib_turbo_adapter_ul_response adapter_resp;
869 struct bblib_turbo_adapter_ul_request adapter_req;
870 struct bblib_turbo_decoder_request turbo_req;
871 struct bblib_turbo_decoder_response turbo_resp;
872 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
874 k_idx = compute_idx(k);
876 ret = is_dec_input_valid(k_idx, kw, total_left);
878 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
882 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
884 ncb_without_null = (k + 4) * 3;
886 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) {
887 struct bblib_deinterleave_ul_request deint_req;
888 struct bblib_deinterleave_ul_response deint_resp;
890 /* SW decoder accepts only a circular buffer without NULL bytes
891 * so the input needs to be converted.
893 remove_nulls_from_circular_buf(in, q->deint_input, k, ncb);
895 deint_req.pharqbuffer = q->deint_input;
896 deint_req.ncb = ncb_without_null;
897 deint_resp.pinteleavebuffer = q->deint_output;
898 bblib_deinterleave_ul(&deint_req, &deint_resp);
900 move_padding_bytes(in, q->deint_output, k, ncb);
902 adapter_input = q->deint_output;
904 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN)
905 adapter_req.isinverted = 1;
906 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)
907 adapter_req.isinverted = 0;
909 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
910 rte_bbdev_log(ERR, "LLR format wasn't specified");
914 adapter_req.ncb = ncb_without_null;
915 adapter_req.pinteleavebuffer = adapter_input;
916 adapter_resp.pharqout = q->adapter_output;
917 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
919 out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3));
921 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
922 rte_bbdev_log(ERR, "Too little space in output mbuf");
925 /* rte_bbdev_op_data.offset can be different than the offset of the
928 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
933 turbo_req.input = (int8_t *)q->adapter_output;
935 turbo_req.k_idx = k_idx;
936 turbo_req.max_iter_num = dec->iter_max;
937 turbo_resp.ag_buf = q->ag;
938 turbo_resp.cb_buf = q->code_block;
939 turbo_resp.output = out;
940 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
941 dec->hard_output.length += (k >> 3);
944 /* Temporary solution for returned iter_count from SDK */
945 iter_cnt = (iter_cnt - 1) / 2;
946 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
948 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
949 rte_bbdev_log(ERR, "Turbo Decoder failed");
955 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
959 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
960 struct rte_mbuf *m_in = dec->input.data;
961 struct rte_mbuf *m_out = dec->hard_output.data;
962 uint16_t in_offset = dec->input.offset;
963 uint16_t total_left = dec->input.length;
964 uint16_t out_offset = dec->hard_output.offset;
966 /* Clear op status */
969 if (m_in == NULL || m_out == NULL) {
970 rte_bbdev_log(ERR, "Invalid mbuf pointer");
971 op->status = 1 << RTE_BBDEV_DATA_ERROR;
975 if (dec->code_block_mode == 0) { /* For Transport Block mode */
976 c = dec->tb_params.c;
977 } else { /* For Code Block mode */
978 k = dec->cb_params.k;
982 while (total_left > 0) {
983 if (dec->code_block_mode == 0)
984 k = (r < dec->tb_params.c_neg) ?
985 dec->tb_params.k_neg : dec->tb_params.k_pos;
987 /* Calculates circular buffer size (Kw).
988 * According to 3gpp 36.212 section 5.1.4.2
992 * where nCol is 32 and nRow can be calculated from:
994 * where D is the size of each output from turbo encoder block
997 kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
999 process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
1000 out_offset, check_bit(dec->op_flags,
1001 RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left);
1002 /* As a result of decoding we get Code Block with included
1003 * decoded CRC24 at the end of Code Block. Type of CRC24 is
1004 * specified by flag.
1007 /* Update total_left */
1009 /* Update offsets for next CBs (if exist) */
1011 out_offset += (k >> 3);
1014 if (total_left != 0) {
1015 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1017 "Mismatch between mbuf length and included Circular buffer sizes");
1021 static inline uint16_t
1022 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
1027 for (i = 0; i < nb_ops; ++i)
1028 enqueue_dec_one_op(q, ops[i]);
1030 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
1036 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
1037 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1039 void *queue = q_data->queue_private;
1040 struct turbo_sw_queue *q = queue;
1041 uint16_t nb_enqueued = 0;
1043 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops);
1045 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1046 q_data->queue_stats.enqueued_count += nb_enqueued;
1053 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
1054 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1056 void *queue = q_data->queue_private;
1057 struct turbo_sw_queue *q = queue;
1058 uint16_t nb_enqueued = 0;
1060 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops);
1062 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1063 q_data->queue_stats.enqueued_count += nb_enqueued;
1068 /* Dequeue decode burst */
1070 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
1071 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1073 struct turbo_sw_queue *q = q_data->queue_private;
1074 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1075 (void **)ops, nb_ops, NULL);
1076 q_data->queue_stats.dequeued_count += nb_dequeued;
1081 /* Dequeue encode burst */
1083 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
1084 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1086 struct turbo_sw_queue *q = q_data->queue_private;
1087 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1088 (void **)ops, nb_ops, NULL);
1089 q_data->queue_stats.dequeued_count += nb_dequeued;
1094 /* Parse 16bit integer from string argument */
1096 parse_u16_arg(const char *key, const char *value, void *extra_args)
1098 uint16_t *u16 = extra_args;
1099 unsigned int long result;
1101 if ((value == NULL) || (extra_args == NULL))
1104 result = strtoul(value, NULL, 0);
1105 if ((result >= (1 << 16)) || (errno != 0)) {
1106 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
1109 *u16 = (uint16_t)result;
1113 /* Parse parameters used to create device */
1115 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args)
1117 struct rte_kvargs *kvlist = NULL;
1123 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params);
1127 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0],
1128 &parse_u16_arg, ¶ms->queues_num);
1132 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1],
1133 &parse_u16_arg, ¶ms->socket_id);
1137 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
1138 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
1139 RTE_MAX_NUMA_NODES);
1146 rte_kvargs_free(kvlist);
1152 turbo_sw_bbdev_create(struct rte_vdev_device *vdev,
1153 struct turbo_sw_params *init_params)
1155 struct rte_bbdev *bbdev;
1156 const char *name = rte_vdev_device_name(vdev);
1158 bbdev = rte_bbdev_allocate(name);
1162 bbdev->data->dev_private = rte_zmalloc_socket(name,
1163 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
1164 init_params->socket_id);
1165 if (bbdev->data->dev_private == NULL) {
1166 rte_bbdev_release(bbdev);
1170 bbdev->dev_ops = &pmd_ops;
1171 bbdev->device = &vdev->device;
1172 bbdev->data->socket_id = init_params->socket_id;
1173 bbdev->intr_handle = NULL;
1175 /* register rx/tx burst functions for data path */
1176 bbdev->dequeue_enc_ops = dequeue_enc_ops;
1177 bbdev->dequeue_dec_ops = dequeue_dec_ops;
1178 bbdev->enqueue_enc_ops = enqueue_enc_ops;
1179 bbdev->enqueue_dec_ops = enqueue_dec_ops;
1180 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
1181 init_params->queues_num;
1186 /* Initialise device */
1188 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev)
1190 struct turbo_sw_params init_params = {
1192 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
1195 const char *input_args;
1200 name = rte_vdev_device_name(vdev);
1203 input_args = rte_vdev_device_args(vdev);
1204 parse_turbo_sw_params(&init_params, input_args);
1206 rte_bbdev_log_debug(
1207 "Initialising %s on NUMA node %d with max queues: %d\n",
1208 name, init_params.socket_id, init_params.queues_num);
1210 return turbo_sw_bbdev_create(vdev, &init_params);
1213 /* Uninitialise device */
1215 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev)
1217 struct rte_bbdev *bbdev;
1223 name = rte_vdev_device_name(vdev);
1227 bbdev = rte_bbdev_get_named_dev(name);
1231 rte_free(bbdev->data->dev_private);
1233 return rte_bbdev_release(bbdev);
1236 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = {
1237 .probe = turbo_sw_bbdev_probe,
1238 .remove = turbo_sw_bbdev_remove
1241 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
1242 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
1243 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
1244 TURBO_SW_SOCKET_ID_ARG"=<int>");
1246 RTE_INIT(null_bbdev_init_log);
1248 null_bbdev_init_log(void)
1250 bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw");
1251 if (bbdev_turbo_sw_logtype >= 0)
1252 rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE);