1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
11 #include <rte_kvargs.h>
13 #include <rte_bbdev.h>
14 #include <rte_bbdev_pmd.h>
16 #include <phy_turbo.h>
18 #include <phy_rate_match.h>
21 #define DRIVER_NAME turbo_sw
23 /* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
24 #define C_SUBBLOCK (32)
25 #define MAX_TB_SIZE (391656)
26 #define MAX_CB_SIZE (6144)
27 #define MAX_KW (18528)
29 /* private data structure */
30 struct bbdev_private {
31 unsigned int max_nb_queues; /**< Max number of queues */
34 /* Initialisation params structure that can be used by Turbo SW driver */
35 struct turbo_sw_params {
36 int socket_id; /*< Turbo SW device socket */
37 uint16_t queues_num; /*< Turbo SW device queues number */
40 /* Accecptable params for Turbo SW devices */
41 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
42 #define TURBO_SW_SOCKET_ID_ARG "socket_id"
44 static const char * const turbo_sw_valid_params[] = {
45 TURBO_SW_MAX_NB_QUEUES_ARG,
46 TURBO_SW_SOCKET_ID_ARG
50 struct turbo_sw_queue {
51 /* Ring for processed (encoded/decoded) operations which are ready to
54 struct rte_ring *processed_pkts;
55 /* Stores input for turbo encoder (used when CRC attachment is
59 /* Stores output from turbo encoder */
61 /* Alpha gamma buf for bblib_turbo_decoder() function */
63 /* Temp buf for bblib_turbo_decoder() function */
65 /* Input buf for bblib_rate_dematching_lte() function */
67 /* Output buf for bblib_rate_dematching_lte() function */
68 uint8_t *deint_output;
69 /* Output buf for bblib_turbodec_adapter_lte() function */
70 uint8_t *adapter_output;
71 /* Operation type of this queue */
72 enum rte_bbdev_op_type type;
73 } __rte_cache_aligned;
75 /* Calculate index based on Table 5.1.3-3 from TS34.212 */
77 compute_idx(uint16_t k)
81 if (k < 40 || k > MAX_CB_SIZE)
85 if ((k - 2048) % 64 != 0)
88 result = 124 + (k - 2048) / 64;
89 } else if (k <= 512) {
90 if ((k - 40) % 8 != 0)
93 result = (k - 40) / 8 + 1;
94 } else if (k <= 1024) {
95 if ((k - 512) % 16 != 0)
98 result = 60 + (k - 512) / 16;
99 } else { /* 1024 < k <= 2048 */
100 if ((k - 1024) % 32 != 0)
103 result = 92 + (k - 1024) / 32;
109 /* Read flag value 0/1 from bitmap */
111 check_bit(uint32_t bitmap, uint32_t bitmask)
113 return bitmap & bitmask;
116 /* Get device info */
118 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
120 struct bbdev_private *internals = dev->data->dev_private;
122 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
124 .type = RTE_BBDEV_OP_TURBO_DEC,
127 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
128 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
129 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
130 RTE_BBDEV_TURBO_CRC_TYPE_24B |
131 RTE_BBDEV_TURBO_EARLY_TERMINATION,
132 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
133 .num_buffers_hard_out =
134 RTE_BBDEV_MAX_CODE_BLOCKS,
135 .num_buffers_soft_out = 0,
139 .type = RTE_BBDEV_OP_TURBO_ENC,
142 RTE_BBDEV_TURBO_CRC_24B_ATTACH |
143 RTE_BBDEV_TURBO_CRC_24A_ATTACH |
144 RTE_BBDEV_TURBO_RATE_MATCH |
145 RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
146 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
147 .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
150 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
153 static struct rte_bbdev_queue_conf default_queue_conf = {
154 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
157 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2;
159 default_queue_conf.socket = dev->data->socket_id;
161 dev_info->driver_name = RTE_STR(DRIVER_NAME);
162 dev_info->max_num_queues = internals->max_nb_queues;
163 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
164 dev_info->hardware_accelerated = false;
165 dev_info->max_queue_priority = 0;
166 dev_info->default_queue_conf = default_queue_conf;
167 dev_info->capabilities = bbdev_capabilities;
168 dev_info->cpu_flag_reqs = &cpu_flag;
169 dev_info->min_alignment = 64;
171 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
176 q_release(struct rte_bbdev *dev, uint16_t q_id)
178 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private;
181 rte_ring_free(q->processed_pkts);
182 rte_free(q->enc_out);
185 rte_free(q->code_block);
186 rte_free(q->deint_input);
187 rte_free(q->deint_output);
188 rte_free(q->adapter_output);
190 dev->data->queues[q_id].queue_private = NULL;
193 rte_bbdev_log_debug("released device queue %u:%u",
194 dev->data->dev_id, q_id);
200 q_setup(struct rte_bbdev *dev, uint16_t q_id,
201 const struct rte_bbdev_queue_conf *queue_conf)
204 struct turbo_sw_queue *q;
205 char name[RTE_RING_NAMESIZE];
207 /* Allocate the queue data structure. */
208 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
209 RTE_CACHE_LINE_SIZE, queue_conf->socket);
211 rte_bbdev_log(ERR, "Failed to allocate queue memory");
215 /* Allocate memory for encoder output. */
216 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u",
217 dev->data->dev_id, q_id);
218 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
220 "Creating queue name for device %u queue %u failed",
221 dev->data->dev_id, q_id);
222 return -ENAMETOOLONG;
224 q->enc_out = rte_zmalloc_socket(name,
225 ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3,
226 RTE_CACHE_LINE_SIZE, queue_conf->socket);
227 if (q->enc_out == NULL) {
229 "Failed to allocate queue memory for %s", name);
233 /* Allocate memory for rate matching output. */
234 ret = snprintf(name, RTE_RING_NAMESIZE,
235 RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id,
237 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
239 "Creating queue name for device %u queue %u failed",
240 dev->data->dev_id, q_id);
241 return -ENAMETOOLONG;
243 q->enc_in = rte_zmalloc_socket(name,
244 (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
245 RTE_CACHE_LINE_SIZE, queue_conf->socket);
246 if (q->enc_in == NULL) {
248 "Failed to allocate queue memory for %s", name);
252 /* Allocate memory for Aplha Gamma temp buffer. */
253 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u",
254 dev->data->dev_id, q_id);
255 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
257 "Creating queue name for device %u queue %u failed",
258 dev->data->dev_id, q_id);
259 return -ENAMETOOLONG;
261 q->ag = rte_zmalloc_socket(name,
262 MAX_CB_SIZE * 10 * sizeof(*q->ag),
263 RTE_CACHE_LINE_SIZE, queue_conf->socket);
266 "Failed to allocate queue memory for %s", name);
270 /* Allocate memory for code block temp buffer. */
271 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u",
272 dev->data->dev_id, q_id);
273 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
275 "Creating queue name for device %u queue %u failed",
276 dev->data->dev_id, q_id);
277 return -ENAMETOOLONG;
279 q->code_block = rte_zmalloc_socket(name,
280 (6144 >> 3) * sizeof(*q->code_block),
281 RTE_CACHE_LINE_SIZE, queue_conf->socket);
282 if (q->code_block == NULL) {
284 "Failed to allocate queue memory for %s", name);
288 /* Allocate memory for Deinterleaver input. */
289 ret = snprintf(name, RTE_RING_NAMESIZE,
290 RTE_STR(DRIVER_NAME)"_deint_input%u:%u",
291 dev->data->dev_id, q_id);
292 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
294 "Creating queue name for device %u queue %u failed",
295 dev->data->dev_id, q_id);
296 return -ENAMETOOLONG;
298 q->deint_input = rte_zmalloc_socket(name,
299 MAX_KW * sizeof(*q->deint_input),
300 RTE_CACHE_LINE_SIZE, queue_conf->socket);
301 if (q->deint_input == NULL) {
303 "Failed to allocate queue memory for %s", name);
307 /* Allocate memory for Deinterleaver output. */
308 ret = snprintf(name, RTE_RING_NAMESIZE,
309 RTE_STR(DRIVER_NAME)"_deint_output%u:%u",
310 dev->data->dev_id, q_id);
311 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
313 "Creating queue name for device %u queue %u failed",
314 dev->data->dev_id, q_id);
315 return -ENAMETOOLONG;
317 q->deint_output = rte_zmalloc_socket(NULL,
318 MAX_KW * sizeof(*q->deint_output),
319 RTE_CACHE_LINE_SIZE, queue_conf->socket);
320 if (q->deint_output == NULL) {
322 "Failed to allocate queue memory for %s", name);
326 /* Allocate memory for Adapter output. */
327 ret = snprintf(name, RTE_RING_NAMESIZE,
328 RTE_STR(DRIVER_NAME)"_adapter_output%u:%u",
329 dev->data->dev_id, q_id);
330 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
332 "Creating queue name for device %u queue %u failed",
333 dev->data->dev_id, q_id);
334 return -ENAMETOOLONG;
336 q->adapter_output = rte_zmalloc_socket(NULL,
337 MAX_CB_SIZE * 6 * sizeof(*q->adapter_output),
338 RTE_CACHE_LINE_SIZE, queue_conf->socket);
339 if (q->adapter_output == NULL) {
341 "Failed to allocate queue memory for %s", name);
345 /* Create ring for packets awaiting to be dequeued. */
346 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u",
347 dev->data->dev_id, q_id);
348 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
350 "Creating queue name for device %u queue %u failed",
351 dev->data->dev_id, q_id);
352 return -ENAMETOOLONG;
354 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
355 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
356 if (q->processed_pkts == NULL) {
357 rte_bbdev_log(ERR, "Failed to create ring for %s", name);
361 q->type = queue_conf->op_type;
363 dev->data->queues[q_id].queue_private = q;
364 rte_bbdev_log_debug("setup device queue %s", name);
368 rte_ring_free(q->processed_pkts);
369 rte_free(q->enc_out);
372 rte_free(q->code_block);
373 rte_free(q->deint_input);
374 rte_free(q->deint_output);
375 rte_free(q->adapter_output);
380 static const struct rte_bbdev_ops pmd_ops = {
381 .info_get = info_get,
382 .queue_setup = q_setup,
383 .queue_release = q_release
386 /* Checks if the encoder input buffer is correct.
387 * Returns 0 if it's valid, -1 otherwise.
390 is_enc_input_valid(const uint16_t k, const int32_t k_idx,
391 const uint16_t in_length)
394 rte_bbdev_log(ERR, "K Index is invalid");
398 if (in_length - (k >> 3) < 0) {
400 "Mismatch between input length (%u bytes) and K (%u bits)",
405 if (k > MAX_CB_SIZE) {
406 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
414 /* Checks if the decoder input buffer is correct.
415 * Returns 0 if it's valid, -1 otherwise.
418 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
421 rte_bbdev_log(ERR, "K index is invalid");
425 if (in_length - kw < 0) {
427 "Mismatch between input length (%u) and kw (%u)",
433 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
442 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
443 uint8_t cb_idx, uint8_t c, uint16_t k, uint16_t ncb,
444 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
445 uint16_t in_offset, uint16_t out_offset, uint16_t total_left)
450 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
451 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
452 struct bblib_crc_request crc_req;
453 struct bblib_turbo_encoder_request turbo_req;
454 struct bblib_turbo_encoder_response turbo_resp;
455 struct bblib_rate_match_dl_request rm_req;
456 struct bblib_rate_match_dl_response rm_resp;
458 k_idx = compute_idx(k);
459 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
461 /* CRC24A (for TB) */
462 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) &&
463 (enc->code_block_mode == 1)) {
464 ret = is_enc_input_valid(k - 24, k_idx, total_left);
466 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
469 /* copy the input to the temporary buffer to be able to extend
472 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
473 crc_req.data = q->enc_in;
474 crc_req.len = (k - 24) >> 3;
475 if (bblib_lte_crc24a_gen(&crc_req) == -1) {
476 op->status |= 1 << RTE_BBDEV_CRC_ERROR;
477 rte_bbdev_log(ERR, "CRC24a generation failed");
481 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
483 ret = is_enc_input_valid(k - 24, k_idx, total_left);
485 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
488 /* copy the input to the temporary buffer to be able to extend
491 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
492 crc_req.data = q->enc_in;
493 crc_req.len = (k - 24) >> 3;
494 if (bblib_lte_crc24b_gen(&crc_req) == -1) {
495 op->status |= 1 << RTE_BBDEV_CRC_ERROR;
496 rte_bbdev_log(ERR, "CRC24b generation failed");
501 ret = is_enc_input_valid(k, k_idx, total_left);
503 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
510 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
511 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
512 * So dst_data's length should be 3*(k/8) + 3 bytes.
515 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
516 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
518 turbo_req.case_id = k_idx;
519 turbo_req.input_win = in;
520 turbo_req.length = k >> 3;
521 turbo_resp.output_win_0 = out0;
522 turbo_resp.output_win_1 = out1;
523 turbo_resp.output_win_2 = out2;
524 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
525 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
526 rte_bbdev_log(ERR, "Turbo Encoder failed");
531 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
532 /* get output data starting address */
533 rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3));
534 if (rm_out == NULL) {
535 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
537 "Too little space in output mbuf");
540 /* rte_bbdev_op_data.offset can be different than the offset
541 * of the appended bytes
543 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
545 /* index of current code block */
547 /* total number of code block */
549 /* For DL - 1, UL - 0 */
550 rm_req.direction = 1;
551 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO
552 * and MDL_HARQ are used for Ncb calculation. As Ncb is already
553 * known we can adjust those parameters
555 rm_req.Nsoft = ncb * rm_req.C;
558 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G
559 * are used for E calculation. As E is already known we can
560 * adjust those parameters
564 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C;
566 rm_req.rvidx = enc->rv_index;
567 rm_req.Kidx = k_idx - 1;
572 rm_resp.output = rm_out;
573 rm_resp.OutputLen = (e >> 3);
574 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
575 rm_req.bypass_rvidx = 1;
577 rm_req.bypass_rvidx = 0;
579 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
580 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
581 rte_bbdev_log(ERR, "Rate matching failed");
584 enc->output.length += rm_resp.OutputLen;
586 /* Rate matching is bypassed */
588 /* Completing last byte of out0 (where 4 tail bits are stored)
589 * by moving first 4 bits from out1
591 tmp_out = (uint8_t *) --out1;
592 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4);
594 /* Shifting out1 data by 4 bits to the left */
595 for (m = 0; m < k >> 3; ++m) {
596 uint8_t *first = tmp_out;
597 uint8_t second = *(tmp_out + 1);
598 *first = (*first << 4) | ((second & 0xF0) >> 4);
601 /* Shifting out2 data by 8 bits to the left */
602 for (m = 0; m < (k >> 3) + 1; ++m) {
603 *tmp_out = *(tmp_out + 1);
608 /* copy shifted output to turbo_enc entity */
609 out0 = (uint8_t *)rte_pktmbuf_append(m_out,
612 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
614 "Too little space in output mbuf");
617 enc->output.length += (k >> 3) * 3 + 2;
618 /* rte_bbdev_op_data.offset can be different than the
619 * offset of the appended bytes
621 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
623 rte_memcpy(out0, q->enc_out, (k >> 3) * 3 + 2);
628 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
630 uint8_t c, r, crc24_bits = 0;
633 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
634 uint16_t in_offset = enc->input.offset;
635 uint16_t out_offset = enc->output.offset;
636 struct rte_mbuf *m_in = enc->input.data;
637 struct rte_mbuf *m_out = enc->output.data;
638 uint16_t total_left = enc->input.length;
640 /* Clear op status */
643 if (total_left > MAX_TB_SIZE >> 3) {
644 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
645 total_left, MAX_TB_SIZE);
646 op->status = 1 << RTE_BBDEV_DATA_ERROR;
650 if (m_in == NULL || m_out == NULL) {
651 rte_bbdev_log(ERR, "Invalid mbuf pointer");
652 op->status = 1 << RTE_BBDEV_DATA_ERROR;
656 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
657 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
660 if (enc->code_block_mode == 0) { /* For Transport Block mode */
661 c = enc->tb_params.c;
662 r = enc->tb_params.r;
663 } else {/* For Code Block mode */
668 while (total_left > 0 && r < c) {
669 if (enc->code_block_mode == 0) {
670 k = (r < enc->tb_params.c_neg) ?
671 enc->tb_params.k_neg : enc->tb_params.k_pos;
672 ncb = (r < enc->tb_params.c_neg) ?
673 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos;
674 e = (r < enc->tb_params.cab) ?
675 enc->tb_params.ea : enc->tb_params.eb;
677 k = enc->cb_params.k;
678 ncb = enc->cb_params.ncb;
679 e = enc->cb_params.e;
682 process_enc_cb(q, op, r, c, k, ncb, e, m_in,
683 m_out, in_offset, out_offset, total_left);
684 /* Update total_left */
685 total_left -= (k - crc24_bits) >> 3;
686 /* Update offsets for next CBs (if exist) */
687 in_offset += (k - crc24_bits) >> 3;
688 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH)
689 out_offset += e >> 3;
691 out_offset += (k >> 3) * 3 + 2;
695 /* check if all input data was processed */
696 if (total_left != 0) {
697 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
699 "Mismatch between mbuf length and included CBs sizes");
703 static inline uint16_t
704 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
709 for (i = 0; i < nb_ops; ++i)
710 enqueue_enc_one_op(q, ops[i]);
712 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
716 /* Remove the padding bytes from a cyclic buffer.
717 * The input buffer is a data stream wk as described in 3GPP TS 36.212 section
718 * 5.1.4.1.2 starting from w0 and with length Ncb bytes.
719 * The output buffer is a data stream wk with pruned padding bytes. It's length
720 * is 3*D bytes and the order of non-padding bytes is preserved.
723 remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
726 uint32_t in_idx, out_idx, c_idx;
727 const uint32_t d = k + 4;
728 const uint32_t kw = (ncb / 3);
729 const uint32_t nd = kw - d;
730 const uint32_t r_subblock = kw / C_SUBBLOCK;
731 /* Inter-column permutation pattern */
732 const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10,
733 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19,
734 11, 27, 7, 23, 15, 31};
738 /* The padding bytes are at the first Nd positions in the first row. */
739 for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) {
741 rte_memcpy(&out[out_idx], &in[in_idx + 1],
743 out_idx += r_subblock - 1;
745 rte_memcpy(&out[out_idx], &in[in_idx], r_subblock);
746 out_idx += r_subblock;
750 /* First and second parity bits sub-blocks are interlaced. */
751 for (c_idx = 0; in_idx < ncb - 2 * r_subblock;
752 in_idx += 2 * r_subblock, ++c_idx) {
753 uint32_t second_block_c_idx = P[c_idx];
754 uint32_t third_block_c_idx = P[c_idx] + 1;
756 if (second_block_c_idx < nd && third_block_c_idx < nd) {
757 rte_memcpy(&out[out_idx], &in[in_idx + 2],
759 out_idx += 2 * r_subblock - 2;
760 } else if (second_block_c_idx >= nd &&
761 third_block_c_idx >= nd) {
762 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock);
763 out_idx += 2 * r_subblock;
764 } else if (second_block_c_idx < nd) {
765 out[out_idx++] = in[in_idx];
766 rte_memcpy(&out[out_idx], &in[in_idx + 2],
768 out_idx += 2 * r_subblock - 2;
770 rte_memcpy(&out[out_idx], &in[in_idx + 1],
772 out_idx += 2 * r_subblock - 1;
776 /* Last interlaced row is different - its last byte is the only padding
777 * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block.
778 * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
779 * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
780 * (moving to another column). 2nd parity sub-block uses the same
781 * inter-column permutation pattern as the systematic and 1st parity
782 * sub-blocks but it adds '1' to the resulting index and calculates the
783 * modulus of the result and Kw. Last column is mapped to itself (id 31)
784 * so the first byte taken from the 2nd parity sub-block will be the
785 * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
786 * last byte will be the first byte from the sub-block:
787 * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
788 * than 2 so we know that bytes with ids 0 and 1 must be the padding
789 * bytes. The bytes from the 1st parity sub-block are the bytes from the
790 * 31st column - Nd can't be greater than 26 so we are sure that there
791 * are no padding bytes in 31st column.
793 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
797 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
801 uint16_t kpi = ncb / 3;
802 uint16_t nd = kpi - d;
804 rte_memcpy(&out[nd], in, d);
805 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
806 rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d);
810 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
811 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
812 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
813 bool check_crc_24b, uint16_t total_left)
818 uint8_t *in, *out, *adapter_input;
819 int32_t ncb, ncb_without_null;
820 struct bblib_turbo_adapter_ul_response adapter_resp;
821 struct bblib_turbo_adapter_ul_request adapter_req;
822 struct bblib_turbo_decoder_request turbo_req;
823 struct bblib_turbo_decoder_response turbo_resp;
824 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
826 k_idx = compute_idx(k);
828 ret = is_dec_input_valid(k_idx, kw, total_left);
830 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
834 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
836 ncb_without_null = (k + 4) * 3;
838 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) {
839 struct bblib_deinterleave_ul_request deint_req;
840 struct bblib_deinterleave_ul_response deint_resp;
842 /* SW decoder accepts only a circular buffer without NULL bytes
843 * so the input needs to be converted.
845 remove_nulls_from_circular_buf(in, q->deint_input, k, ncb);
847 deint_req.pharqbuffer = q->deint_input;
848 deint_req.ncb = ncb_without_null;
849 deint_resp.pinteleavebuffer = q->deint_output;
850 bblib_deinterleave_ul(&deint_req, &deint_resp);
852 move_padding_bytes(in, q->deint_output, k, ncb);
854 adapter_input = q->deint_output;
856 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN)
857 adapter_req.isinverted = 1;
858 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)
859 adapter_req.isinverted = 0;
861 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
862 rte_bbdev_log(ERR, "LLR format wasn't specified");
866 adapter_req.ncb = ncb_without_null;
867 adapter_req.pinteleavebuffer = adapter_input;
868 adapter_resp.pharqout = q->adapter_output;
869 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
871 out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3));
873 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
874 rte_bbdev_log(ERR, "Too little space in output mbuf");
877 /* rte_bbdev_op_data.offset can be different than the offset of the
880 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
885 turbo_req.input = (int8_t *)q->adapter_output;
887 turbo_req.k_idx = k_idx;
888 turbo_req.max_iter_num = dec->iter_max;
889 turbo_resp.ag_buf = q->ag;
890 turbo_resp.cb_buf = q->code_block;
891 turbo_resp.output = out;
892 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
893 dec->hard_output.length += (k >> 3);
896 /* Temporary solution for returned iter_count from SDK */
897 iter_cnt = (iter_cnt - 1) / 2;
898 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
900 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
901 rte_bbdev_log(ERR, "Turbo Decoder failed");
907 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
911 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
912 struct rte_mbuf *m_in = dec->input.data;
913 struct rte_mbuf *m_out = dec->hard_output.data;
914 uint16_t in_offset = dec->input.offset;
915 uint16_t total_left = dec->input.length;
916 uint16_t out_offset = dec->hard_output.offset;
918 /* Clear op status */
921 if (m_in == NULL || m_out == NULL) {
922 rte_bbdev_log(ERR, "Invalid mbuf pointer");
923 op->status = 1 << RTE_BBDEV_DATA_ERROR;
927 if (dec->code_block_mode == 0) { /* For Transport Block mode */
928 c = dec->tb_params.c;
929 } else { /* For Code Block mode */
930 k = dec->cb_params.k;
934 while (total_left > 0) {
935 if (dec->code_block_mode == 0)
936 k = (r < dec->tb_params.c_neg) ?
937 dec->tb_params.k_neg : dec->tb_params.k_pos;
939 /* Calculates circular buffer size (Kw).
940 * According to 3gpp 36.212 section 5.1.4.2
944 * where nCol is 32 and nRow can be calculated from:
946 * where D is the size of each output from turbo encoder block
949 kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3;
951 process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
952 out_offset, check_bit(dec->op_flags,
953 RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left);
954 /* As a result of decoding we get Code Block with included
955 * decoded CRC24 at the end of Code Block. Type of CRC24 is
959 /* Update total_left */
961 /* Update offsets for next CBs (if exist) */
963 out_offset += (k >> 3);
966 if (total_left != 0) {
967 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
969 "Mismatch between mbuf length and included Circular buffer sizes");
973 static inline uint16_t
974 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
979 for (i = 0; i < nb_ops; ++i)
980 enqueue_dec_one_op(q, ops[i]);
982 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
988 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
989 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
991 void *queue = q_data->queue_private;
992 struct turbo_sw_queue *q = queue;
993 uint16_t nb_enqueued = 0;
995 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops);
997 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
998 q_data->queue_stats.enqueued_count += nb_enqueued;
1005 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
1006 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1008 void *queue = q_data->queue_private;
1009 struct turbo_sw_queue *q = queue;
1010 uint16_t nb_enqueued = 0;
1012 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops);
1014 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1015 q_data->queue_stats.enqueued_count += nb_enqueued;
1020 /* Dequeue decode burst */
1022 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
1023 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1025 struct turbo_sw_queue *q = q_data->queue_private;
1026 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1027 (void **)ops, nb_ops, NULL);
1028 q_data->queue_stats.dequeued_count += nb_dequeued;
1033 /* Dequeue encode burst */
1035 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
1036 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1038 struct turbo_sw_queue *q = q_data->queue_private;
1039 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1040 (void **)ops, nb_ops, NULL);
1041 q_data->queue_stats.dequeued_count += nb_dequeued;
1046 /* Parse 16bit integer from string argument */
1048 parse_u16_arg(const char *key, const char *value, void *extra_args)
1050 uint16_t *u16 = extra_args;
1051 unsigned int long result;
1053 if ((value == NULL) || (extra_args == NULL))
1056 result = strtoul(value, NULL, 0);
1057 if ((result >= (1 << 16)) || (errno != 0)) {
1058 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
1061 *u16 = (uint16_t)result;
1065 /* Parse parameters used to create device */
1067 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args)
1069 struct rte_kvargs *kvlist = NULL;
1075 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params);
1079 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0],
1080 &parse_u16_arg, ¶ms->queues_num);
1084 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1],
1085 &parse_u16_arg, ¶ms->socket_id);
1089 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
1090 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
1091 RTE_MAX_NUMA_NODES);
1098 rte_kvargs_free(kvlist);
1104 turbo_sw_bbdev_create(struct rte_vdev_device *vdev,
1105 struct turbo_sw_params *init_params)
1107 struct rte_bbdev *bbdev;
1108 const char *name = rte_vdev_device_name(vdev);
1110 bbdev = rte_bbdev_allocate(name);
1114 bbdev->data->dev_private = rte_zmalloc_socket(name,
1115 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
1116 init_params->socket_id);
1117 if (bbdev->data->dev_private == NULL) {
1118 rte_bbdev_release(bbdev);
1122 bbdev->dev_ops = &pmd_ops;
1123 bbdev->device = &vdev->device;
1124 bbdev->data->socket_id = init_params->socket_id;
1125 bbdev->intr_handle = NULL;
1127 /* register rx/tx burst functions for data path */
1128 bbdev->dequeue_enc_ops = dequeue_enc_ops;
1129 bbdev->dequeue_dec_ops = dequeue_dec_ops;
1130 bbdev->enqueue_enc_ops = enqueue_enc_ops;
1131 bbdev->enqueue_dec_ops = enqueue_dec_ops;
1132 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
1133 init_params->queues_num;
1138 /* Initialise device */
1140 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev)
1142 struct turbo_sw_params init_params = {
1144 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
1147 const char *input_args;
1152 name = rte_vdev_device_name(vdev);
1155 input_args = rte_vdev_device_args(vdev);
1156 parse_turbo_sw_params(&init_params, input_args);
1158 rte_bbdev_log_debug(
1159 "Initialising %s on NUMA node %d with max queues: %d\n",
1160 name, init_params.socket_id, init_params.queues_num);
1162 return turbo_sw_bbdev_create(vdev, &init_params);
1165 /* Uninitialise device */
1167 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev)
1169 struct rte_bbdev *bbdev;
1175 name = rte_vdev_device_name(vdev);
1179 bbdev = rte_bbdev_get_named_dev(name);
1183 rte_free(bbdev->data->dev_private);
1185 return rte_bbdev_release(bbdev);
1188 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = {
1189 .probe = turbo_sw_bbdev_probe,
1190 .remove = turbo_sw_bbdev_remove
1193 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
1194 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
1195 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
1196 TURBO_SW_SOCKET_ID_ARG"=<int>");
1199 RTE_INIT(null_bbdev_init_log);
1201 null_bbdev_init_log(void)
1203 bbdev_logtype = rte_log_register("pmd.bbdev.turbo_sw");
1204 if (bbdev_logtype >= 0)
1205 rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);