1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
11 #include <rte_kvargs.h>
12 #include <rte_cycles.h>
13 #include <rte_errno.h>
15 #include <rte_bbdev.h>
16 #include <rte_bbdev_pmd.h>
18 #include <rte_hexdump.h>
21 #ifdef RTE_BBDEV_SDK_AVX2
24 #include <phy_turbo.h>
26 #include <phy_rate_match.h>
28 #ifdef RTE_BBDEV_SDK_AVX512
29 #include <bit_reverse.h>
30 #include <phy_ldpc_encoder_5gnr.h>
31 #include <phy_ldpc_decoder_5gnr.h>
32 #include <phy_LDPC_ratematch_5gnr.h>
33 #include <phy_rate_dematching_5gnr.h>
36 #define DRIVER_NAME baseband_turbo_sw
38 RTE_LOG_REGISTER(bbdev_turbo_sw_logtype, pmd.bb.turbo_sw, NOTICE);
40 /* Helper macro for logging */
41 #define rte_bbdev_log(level, fmt, ...) \
42 rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \
45 #define rte_bbdev_log_debug(fmt, ...) \
46 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
49 #define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_TURBO_MAX_CB_SIZE >> 3) + 1) * 48)
50 #define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6)
51 #define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_TURBO_MAX_CB_SIZE + 4) * 48)
53 /* private data structure */
54 struct bbdev_private {
55 unsigned int max_nb_queues; /**< Max number of queues */
58 /* Initialisation params structure that can be used by Turbo SW driver */
59 struct turbo_sw_params {
60 int socket_id; /*< Turbo SW device socket */
61 uint16_t queues_num; /*< Turbo SW device queues number */
64 /* Accecptable params for Turbo SW devices */
65 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
66 #define TURBO_SW_SOCKET_ID_ARG "socket_id"
68 static const char * const turbo_sw_valid_params[] = {
69 TURBO_SW_MAX_NB_QUEUES_ARG,
70 TURBO_SW_SOCKET_ID_ARG
74 struct turbo_sw_queue {
75 /* Ring for processed (encoded/decoded) operations which are ready to
78 struct rte_ring *processed_pkts;
79 /* Stores input for turbo encoder (used when CRC attachment is
83 /* Stores output from turbo encoder */
85 /* Alpha gamma buf for bblib_turbo_decoder() function */
87 /* Temp buf for bblib_turbo_decoder() function */
89 /* Input buf for bblib_rate_dematching_lte() function */
91 /* Output buf for bblib_rate_dematching_lte() function */
92 uint8_t *deint_output;
93 /* Output buf for bblib_turbodec_adapter_lte() function */
94 uint8_t *adapter_output;
95 /* Operation type of this queue */
96 enum rte_bbdev_op_type type;
97 } __rte_cache_aligned;
100 #ifdef RTE_BBDEV_SDK_AVX2
102 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
104 if (unlikely(len > rte_pktmbuf_tailroom(m)))
107 char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
108 m->data_len = (uint16_t)(m->data_len + len);
109 m_head->pkt_len = (m_head->pkt_len + len);
113 /* Calculate index based on Table 5.1.3-3 from TS34.212 */
114 static inline int32_t
115 compute_idx(uint16_t k)
119 if (k < RTE_BBDEV_TURBO_MIN_CB_SIZE || k > RTE_BBDEV_TURBO_MAX_CB_SIZE)
123 if ((k - 2048) % 64 != 0)
126 result = 124 + (k - 2048) / 64;
127 } else if (k <= 512) {
128 if ((k - 40) % 8 != 0)
131 result = (k - 40) / 8 + 1;
132 } else if (k <= 1024) {
133 if ((k - 512) % 16 != 0)
136 result = 60 + (k - 512) / 16;
137 } else { /* 1024 < k <= 2048 */
138 if ((k - 1024) % 32 != 0)
141 result = 92 + (k - 1024) / 32;
148 /* Read flag value 0/1 from bitmap */
150 check_bit(uint32_t bitmap, uint32_t bitmask)
152 return bitmap & bitmask;
155 /* Get device info */
157 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
159 struct bbdev_private *internals = dev->data->dev_private;
161 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
162 #ifdef RTE_BBDEV_SDK_AVX2
164 .type = RTE_BBDEV_OP_TURBO_DEC,
167 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
168 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
169 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
170 RTE_BBDEV_TURBO_CRC_TYPE_24B |
171 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
172 RTE_BBDEV_TURBO_EARLY_TERMINATION,
173 .max_llr_modulus = 16,
175 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
176 .num_buffers_hard_out =
177 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
178 .num_buffers_soft_out = 0,
182 .type = RTE_BBDEV_OP_TURBO_ENC,
185 RTE_BBDEV_TURBO_CRC_24B_ATTACH |
186 RTE_BBDEV_TURBO_CRC_24A_ATTACH |
187 RTE_BBDEV_TURBO_RATE_MATCH |
188 RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
190 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
192 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
196 #ifdef RTE_BBDEV_SDK_AVX512
198 .type = RTE_BBDEV_OP_LDPC_ENC,
201 RTE_BBDEV_LDPC_RATE_MATCH |
202 RTE_BBDEV_LDPC_CRC_24A_ATTACH |
203 RTE_BBDEV_LDPC_CRC_24B_ATTACH,
205 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
207 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
211 .type = RTE_BBDEV_OP_LDPC_DEC,
214 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
215 RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK |
216 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
217 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
218 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
219 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE,
223 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
224 .num_buffers_hard_out =
225 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
226 .num_buffers_soft_out = 0,
230 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
233 static struct rte_bbdev_queue_conf default_queue_conf = {
234 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
236 #ifdef RTE_BBDEV_SDK_AVX2
237 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2;
238 dev_info->cpu_flag_reqs = &cpu_flag;
240 dev_info->cpu_flag_reqs = NULL;
242 default_queue_conf.socket = dev->data->socket_id;
244 dev_info->driver_name = RTE_STR(DRIVER_NAME);
245 dev_info->max_num_queues = internals->max_nb_queues;
246 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
247 dev_info->hardware_accelerated = false;
248 dev_info->max_dl_queue_priority = 0;
249 dev_info->max_ul_queue_priority = 0;
250 dev_info->default_queue_conf = default_queue_conf;
251 dev_info->capabilities = bbdev_capabilities;
252 dev_info->min_alignment = 64;
253 dev_info->harq_buffer_size = 0;
255 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
260 q_release(struct rte_bbdev *dev, uint16_t q_id)
262 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private;
265 rte_ring_free(q->processed_pkts);
266 rte_free(q->enc_out);
269 rte_free(q->code_block);
270 rte_free(q->deint_input);
271 rte_free(q->deint_output);
272 rte_free(q->adapter_output);
274 dev->data->queues[q_id].queue_private = NULL;
277 rte_bbdev_log_debug("released device queue %u:%u",
278 dev->data->dev_id, q_id);
284 q_setup(struct rte_bbdev *dev, uint16_t q_id,
285 const struct rte_bbdev_queue_conf *queue_conf)
288 struct turbo_sw_queue *q;
289 char name[RTE_RING_NAMESIZE];
291 /* Allocate the queue data structure. */
292 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
293 RTE_CACHE_LINE_SIZE, queue_conf->socket);
295 rte_bbdev_log(ERR, "Failed to allocate queue memory");
299 /* Allocate memory for encoder output. */
300 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_o%u:%u",
301 dev->data->dev_id, q_id);
302 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
304 "Creating queue name for device %u queue %u failed",
305 dev->data->dev_id, q_id);
309 q->enc_out = rte_zmalloc_socket(name,
310 ((RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) + 3) *
311 sizeof(*q->enc_out) * 3,
312 RTE_CACHE_LINE_SIZE, queue_conf->socket);
313 if (q->enc_out == NULL) {
315 "Failed to allocate queue memory for %s", name);
320 /* Allocate memory for rate matching output. */
321 ret = snprintf(name, RTE_RING_NAMESIZE,
322 RTE_STR(DRIVER_NAME)"_enc_i%u:%u", dev->data->dev_id,
324 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
326 "Creating queue name for device %u queue %u failed",
327 dev->data->dev_id, q_id);
331 q->enc_in = rte_zmalloc_socket(name,
332 (RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
333 RTE_CACHE_LINE_SIZE, queue_conf->socket);
334 if (q->enc_in == NULL) {
336 "Failed to allocate queue memory for %s", name);
341 /* Allocate memory for Alpha Gamma temp buffer. */
342 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u",
343 dev->data->dev_id, q_id);
344 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
346 "Creating queue name for device %u queue %u failed",
347 dev->data->dev_id, q_id);
351 q->ag = rte_zmalloc_socket(name,
352 RTE_BBDEV_TURBO_MAX_CB_SIZE * 10 * sizeof(*q->ag),
353 RTE_CACHE_LINE_SIZE, queue_conf->socket);
356 "Failed to allocate queue memory for %s", name);
361 /* Allocate memory for code block temp buffer. */
362 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u",
363 dev->data->dev_id, q_id);
364 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
366 "Creating queue name for device %u queue %u failed",
367 dev->data->dev_id, q_id);
371 q->code_block = rte_zmalloc_socket(name,
372 RTE_BBDEV_TURBO_MAX_CB_SIZE * sizeof(*q->code_block),
373 RTE_CACHE_LINE_SIZE, queue_conf->socket);
374 if (q->code_block == NULL) {
376 "Failed to allocate queue memory for %s", name);
381 /* Allocate memory for Deinterleaver input. */
382 ret = snprintf(name, RTE_RING_NAMESIZE,
383 RTE_STR(DRIVER_NAME)"_de_i%u:%u",
384 dev->data->dev_id, q_id);
385 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
387 "Creating queue name for device %u queue %u failed",
388 dev->data->dev_id, q_id);
392 q->deint_input = rte_zmalloc_socket(name,
393 DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
394 RTE_CACHE_LINE_SIZE, queue_conf->socket);
395 if (q->deint_input == NULL) {
397 "Failed to allocate queue memory for %s", name);
402 /* Allocate memory for Deinterleaver output. */
403 ret = snprintf(name, RTE_RING_NAMESIZE,
404 RTE_STR(DRIVER_NAME)"_de_o%u:%u",
405 dev->data->dev_id, q_id);
406 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
408 "Creating queue name for device %u queue %u failed",
409 dev->data->dev_id, q_id);
413 q->deint_output = rte_zmalloc_socket(NULL,
414 DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
415 RTE_CACHE_LINE_SIZE, queue_conf->socket);
416 if (q->deint_output == NULL) {
418 "Failed to allocate queue memory for %s", name);
423 /* Allocate memory for Adapter output. */
424 ret = snprintf(name, RTE_RING_NAMESIZE,
425 RTE_STR(DRIVER_NAME)"_ada_o%u:%u",
426 dev->data->dev_id, q_id);
427 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
429 "Creating queue name for device %u queue %u failed",
430 dev->data->dev_id, q_id);
434 q->adapter_output = rte_zmalloc_socket(NULL,
435 ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
436 RTE_CACHE_LINE_SIZE, queue_conf->socket);
437 if (q->adapter_output == NULL) {
439 "Failed to allocate queue memory for %s", name);
444 /* Create ring for packets awaiting to be dequeued. */
445 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u",
446 dev->data->dev_id, q_id);
447 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
449 "Creating queue name for device %u queue %u failed",
450 dev->data->dev_id, q_id);
454 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
455 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
456 if (q->processed_pkts == NULL) {
457 rte_bbdev_log(ERR, "Failed to create ring for %s", name);
462 q->type = queue_conf->op_type;
464 dev->data->queues[q_id].queue_private = q;
465 rte_bbdev_log_debug("setup device queue %s", name);
469 rte_ring_free(q->processed_pkts);
470 rte_free(q->enc_out);
473 rte_free(q->code_block);
474 rte_free(q->deint_input);
475 rte_free(q->deint_output);
476 rte_free(q->adapter_output);
481 static const struct rte_bbdev_ops pmd_ops = {
482 .info_get = info_get,
483 .queue_setup = q_setup,
484 .queue_release = q_release
487 #ifdef RTE_BBDEV_SDK_AVX2
488 #ifdef RTE_LIBRTE_BBDEV_DEBUG
489 /* Checks if the encoder input buffer is correct.
490 * Returns 0 if it's valid, -1 otherwise.
493 is_enc_input_valid(const uint16_t k, const int32_t k_idx,
494 const uint16_t in_length)
497 rte_bbdev_log(ERR, "K Index is invalid");
501 if (in_length - (k >> 3) < 0) {
503 "Mismatch between input length (%u bytes) and K (%u bits)",
508 if (k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
509 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
510 k, RTE_BBDEV_TURBO_MAX_CB_SIZE);
517 /* Checks if the decoder input buffer is correct.
518 * Returns 0 if it's valid, -1 otherwise.
521 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
524 rte_bbdev_log(ERR, "K index is invalid");
528 if (in_length < kw) {
530 "Mismatch between input length (%u) and kw (%u)",
535 if (kw > RTE_BBDEV_TURBO_MAX_KW) {
536 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
537 kw, RTE_BBDEV_TURBO_MAX_KW);
547 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
548 uint8_t r, uint8_t c, uint16_t k, uint16_t ncb,
549 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out_head,
550 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
551 uint16_t in_length, struct rte_bbdev_stats *q_stats)
553 #ifdef RTE_BBDEV_SDK_AVX2
554 #ifdef RTE_LIBRTE_BBDEV_DEBUG
557 RTE_SET_USED(in_length);
561 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
562 uint64_t first_3_bytes = 0;
563 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
564 struct bblib_crc_request crc_req;
565 struct bblib_crc_response crc_resp;
566 struct bblib_turbo_encoder_request turbo_req;
567 struct bblib_turbo_encoder_response turbo_resp;
568 struct bblib_rate_match_dl_request rm_req;
569 struct bblib_rate_match_dl_response rm_resp;
570 #ifdef RTE_BBDEV_OFFLOAD_COST
573 RTE_SET_USED(q_stats);
576 k_idx = compute_idx(k);
577 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
579 /* CRC24A (for TB) */
580 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) &&
581 (enc->code_block_mode == 1)) {
582 #ifdef RTE_LIBRTE_BBDEV_DEBUG
583 ret = is_enc_input_valid(k - 24, k_idx, in_length);
585 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
591 crc_req.len = k - 24;
592 /* Check if there is a room for CRC bits if not use
593 * the temporary buffer.
595 if (mbuf_append(m_in, m_in, 3) == NULL) {
596 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
599 /* Store 3 first bytes of next CB as they will be
600 * overwritten by CRC bytes. If it is the last CB then
601 * there is no point to store 3 next bytes and this
602 * if..else branch will be omitted.
604 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
608 #ifdef RTE_BBDEV_OFFLOAD_COST
609 start_time = rte_rdtsc_precise();
611 /* CRC24A generation */
612 bblib_lte_crc24a_gen(&crc_req, &crc_resp);
613 #ifdef RTE_BBDEV_OFFLOAD_COST
614 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
616 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
618 #ifdef RTE_LIBRTE_BBDEV_DEBUG
619 ret = is_enc_input_valid(k - 24, k_idx, in_length);
621 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
627 crc_req.len = k - 24;
628 /* Check if there is a room for CRC bits if this is the last
629 * CB in TB. If not use temporary buffer.
631 if ((c - r == 1) && (mbuf_append(m_in, m_in, 3) == NULL)) {
632 rte_memcpy(q->enc_in, in, (k - 24) >> 3);
634 } else if (c - r > 1) {
635 /* Store 3 first bytes of next CB as they will be
636 * overwritten by CRC bytes. If it is the last CB then
637 * there is no point to store 3 next bytes and this
638 * if..else branch will be omitted.
640 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
644 #ifdef RTE_BBDEV_OFFLOAD_COST
645 start_time = rte_rdtsc_precise();
647 /* CRC24B generation */
648 bblib_lte_crc24b_gen(&crc_req, &crc_resp);
649 #ifdef RTE_BBDEV_OFFLOAD_COST
650 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
653 #ifdef RTE_LIBRTE_BBDEV_DEBUG
655 ret = is_enc_input_valid(k, k_idx, in_length);
657 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
665 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
666 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
667 * So dst_data's length should be 3*(k/8) + 3 bytes.
668 * In Rate-matching bypass case outputs pointers passed to encoder
669 * (out0, out1 and out2) can directly point to addresses of output from
672 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
674 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
675 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
677 out0 = (uint8_t *)mbuf_append(m_out_head, m_out,
680 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
682 "Too little space in output mbuf");
685 enc->output.length += (k >> 3) * 3 + 2;
686 /* rte_bbdev_op_data.offset can be different than the
687 * offset of the appended bytes
689 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
690 out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
691 out_offset + (k >> 3) + 1);
692 out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
693 out_offset + 2 * ((k >> 3) + 1));
696 turbo_req.case_id = k_idx;
697 turbo_req.input_win = in;
698 turbo_req.length = k >> 3;
699 turbo_resp.output_win_0 = out0;
700 turbo_resp.output_win_1 = out1;
701 turbo_resp.output_win_2 = out2;
703 #ifdef RTE_BBDEV_OFFLOAD_COST
704 start_time = rte_rdtsc_precise();
707 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
708 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
709 rte_bbdev_log(ERR, "Turbo Encoder failed");
712 #ifdef RTE_BBDEV_OFFLOAD_COST
713 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
716 /* Restore 3 first bytes of next CB if they were overwritten by CRC*/
717 if (first_3_bytes != 0)
718 *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes;
721 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
723 /* Integer round up division by 8 */
724 uint16_t out_len = (e + 7) >> 3;
725 /* The mask array is indexed using E%8. E is an even number so
726 * there are only 4 possible values.
728 const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC};
730 /* get output data starting address */
731 rm_out = (uint8_t *)mbuf_append(m_out_head, m_out, out_len);
732 if (rm_out == NULL) {
733 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
735 "Too little space in output mbuf");
738 /* rte_bbdev_op_data.offset can be different than the offset
739 * of the appended bytes
741 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
743 /* index of current code block */
745 /* total number of code block */
747 /* For DL - 1, UL - 0 */
748 rm_req.direction = 1;
749 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO
750 * and MDL_HARQ are used for Ncb calculation. As Ncb is already
751 * known we can adjust those parameters
753 rm_req.Nsoft = ncb * rm_req.C;
756 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G
757 * are used for E calculation. As E is already known we can
758 * adjust those parameters
762 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C;
764 rm_req.rvidx = enc->rv_index;
765 rm_req.Kidx = k_idx - 1;
770 rm_resp.output = rm_out;
771 rm_resp.OutputLen = out_len;
772 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
773 rm_req.bypass_rvidx = 1;
775 rm_req.bypass_rvidx = 0;
777 #ifdef RTE_BBDEV_OFFLOAD_COST
778 start_time = rte_rdtsc_precise();
781 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
782 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
783 rte_bbdev_log(ERR, "Rate matching failed");
786 #ifdef RTE_BBDEV_OFFLOAD_COST
787 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
790 /* SW fills an entire last byte even if E%8 != 0. Clear the
791 * superfluous data bits for consistency with HW device.
793 mask_id = (e & 7) >> 1;
794 rm_out[out_len - 1] &= mask_out[mask_id];
795 enc->output.length += rm_resp.OutputLen;
797 /* Rate matching is bypassed */
799 /* Completing last byte of out0 (where 4 tail bits are stored)
800 * by moving first 4 bits from out1
802 tmp_out = (uint8_t *) --out1;
803 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4);
805 /* Shifting out1 data by 4 bits to the left */
806 for (m = 0; m < k >> 3; ++m) {
807 uint8_t *first = tmp_out;
808 uint8_t second = *(tmp_out + 1);
809 *first = (*first << 4) | ((second & 0xF0) >> 4);
812 /* Shifting out2 data by 8 bits to the left */
813 for (m = 0; m < (k >> 3) + 1; ++m) {
814 *tmp_out = *(tmp_out + 1);
828 RTE_SET_USED(m_out_head);
830 RTE_SET_USED(in_offset);
831 RTE_SET_USED(out_offset);
832 RTE_SET_USED(in_length);
833 RTE_SET_USED(q_stats);
839 process_ldpc_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
840 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out_head,
841 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
842 uint16_t seg_total_left, struct rte_bbdev_stats *q_stats)
844 #ifdef RTE_BBDEV_SDK_AVX512
845 RTE_SET_USED(seg_total_left);
846 uint8_t *in, *rm_out;
847 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
848 struct bblib_ldpc_encoder_5gnr_request ldpc_req;
849 struct bblib_ldpc_encoder_5gnr_response ldpc_resp;
850 struct bblib_LDPC_ratematch_5gnr_request rm_req;
851 struct bblib_LDPC_ratematch_5gnr_response rm_resp;
852 struct bblib_crc_request crc_req;
853 struct bblib_crc_response crc_resp;
854 uint16_t msgLen, puntBits, parity_offset, out_len;
855 uint16_t K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
856 uint16_t in_length_in_bits = K - enc->n_filler;
857 uint16_t in_length_in_bytes = (in_length_in_bits + 7) >> 3;
859 #ifdef RTE_BBDEV_OFFLOAD_COST
860 uint64_t start_time = rte_rdtsc_precise();
862 RTE_SET_USED(q_stats);
865 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
867 /* Masking the Filler bits explicitly */
868 memset(q->enc_in + (in_length_in_bytes - 3), 0,
869 ((K + 7) >> 3) - (in_length_in_bytes - 3));
871 if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) {
872 rte_memcpy(q->enc_in, in, in_length_in_bytes - 3);
874 crc_req.len = in_length_in_bits - 24;
875 crc_resp.data = q->enc_in;
876 bblib_lte_crc24a_gen(&crc_req, &crc_resp);
877 } else if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH) {
878 rte_memcpy(q->enc_in, in, in_length_in_bytes - 3);
880 crc_req.len = in_length_in_bits - 24;
881 crc_resp.data = q->enc_in;
882 bblib_lte_crc24b_gen(&crc_req, &crc_resp);
884 rte_memcpy(q->enc_in, in, in_length_in_bytes);
887 ldpc_req.Zc = enc->z_c;
888 ldpc_req.baseGraph = enc->basegraph;
889 /* Number of rows set to maximum */
890 ldpc_req.nRows = ldpc_req.baseGraph == 1 ? 46 : 42;
891 ldpc_req.numberCodeblocks = 1;
892 ldpc_req.input[0] = (int8_t *) q->enc_in;
893 ldpc_resp.output[0] = (int8_t *) q->enc_out;
895 bblib_bit_reverse(ldpc_req.input[0], in_length_in_bytes << 3);
897 if (bblib_ldpc_encoder_5gnr(&ldpc_req, &ldpc_resp) != 0) {
898 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
899 rte_bbdev_log(ERR, "LDPC Encoder failed");
904 * Systematic + Parity : Recreating stream with filler bits, ideally
905 * the bit select could handle this in the RM SDK
907 msgLen = (ldpc_req.baseGraph == 1 ? 22 : 10) * ldpc_req.Zc;
908 puntBits = 2 * ldpc_req.Zc;
909 parity_offset = msgLen - puntBits;
910 ippsCopyBE_1u(((uint8_t *) ldpc_req.input[0]) + (puntBits / 8),
911 puntBits%8, q->adapter_output, 0, parity_offset);
912 ippsCopyBE_1u(q->enc_out, 0, q->adapter_output + (parity_offset / 8),
913 parity_offset % 8, ldpc_req.nRows * ldpc_req.Zc);
915 out_len = (e + 7) >> 3;
916 /* get output data starting address */
917 rm_out = (uint8_t *)mbuf_append(m_out_head, m_out, out_len);
918 if (rm_out == NULL) {
919 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
921 "Too little space in output mbuf");
925 * rte_bbdev_op_data.offset can be different than the offset
926 * of the appended bytes
928 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
932 rm_req.Ncb = enc->n_cb;
933 rm_req.Qm = enc->q_m;
934 rm_req.Zc = enc->z_c;
935 rm_req.baseGraph = enc->basegraph;
936 rm_req.input = q->adapter_output;
937 rm_req.nLen = enc->n_filler;
938 rm_req.nullIndex = parity_offset - enc->n_filler;
939 rm_req.rvidx = enc->rv_index;
940 rm_resp.output = q->deint_output;
942 if (bblib_LDPC_ratematch_5gnr(&rm_req, &rm_resp) != 0) {
943 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
944 rte_bbdev_log(ERR, "Rate matching failed");
948 /* RM SDK may provide non zero bits on last byte */
950 q->deint_output[out_len-1] &= (1 << (e % 8)) - 1;
952 bblib_bit_reverse((int8_t *) q->deint_output, out_len << 3);
954 rte_memcpy(rm_out, q->deint_output, out_len);
955 enc->output.length += out_len;
957 #ifdef RTE_BBDEV_OFFLOAD_COST
958 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
965 RTE_SET_USED(m_out_head);
967 RTE_SET_USED(in_offset);
968 RTE_SET_USED(out_offset);
969 RTE_SET_USED(seg_total_left);
970 RTE_SET_USED(q_stats);
975 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
976 struct rte_bbdev_stats *queue_stats)
978 uint8_t c, r, crc24_bits = 0;
981 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
982 uint16_t in_offset = enc->input.offset;
983 uint16_t out_offset = enc->output.offset;
984 struct rte_mbuf *m_in = enc->input.data;
985 struct rte_mbuf *m_out = enc->output.data;
986 struct rte_mbuf *m_out_head = enc->output.data;
987 uint32_t in_length, mbuf_total_left = enc->input.length;
988 uint16_t seg_total_left;
990 /* Clear op status */
993 if (mbuf_total_left > RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) {
994 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
995 mbuf_total_left, RTE_BBDEV_TURBO_MAX_TB_SIZE);
996 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1000 if (m_in == NULL || m_out == NULL) {
1001 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1002 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1006 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
1007 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
1010 if (enc->code_block_mode == 0) { /* For Transport Block mode */
1011 c = enc->tb_params.c;
1012 r = enc->tb_params.r;
1013 } else {/* For Code Block mode */
1018 while (mbuf_total_left > 0 && r < c) {
1020 seg_total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1022 if (enc->code_block_mode == 0) {
1023 k = (r < enc->tb_params.c_neg) ?
1024 enc->tb_params.k_neg : enc->tb_params.k_pos;
1025 ncb = (r < enc->tb_params.c_neg) ?
1026 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos;
1027 e = (r < enc->tb_params.cab) ?
1028 enc->tb_params.ea : enc->tb_params.eb;
1030 k = enc->cb_params.k;
1031 ncb = enc->cb_params.ncb;
1032 e = enc->cb_params.e;
1035 process_enc_cb(q, op, r, c, k, ncb, e, m_in, m_out_head,
1036 m_out, in_offset, out_offset, seg_total_left,
1038 /* Update total_left */
1039 in_length = ((k - crc24_bits) >> 3);
1040 mbuf_total_left -= in_length;
1041 /* Update offsets for next CBs (if exist) */
1042 in_offset += (k - crc24_bits) >> 3;
1043 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH)
1044 out_offset += e >> 3;
1046 out_offset += (k >> 3) * 3 + 2;
1048 /* Update offsets */
1049 if (seg_total_left == in_length) {
1050 /* Go to the next mbuf */
1052 m_out = m_out->next;
1059 /* check if all input data was processed */
1060 if (mbuf_total_left != 0) {
1061 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1063 "Mismatch between mbuf length and included CBs sizes");
1069 enqueue_ldpc_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
1070 struct rte_bbdev_stats *queue_stats)
1072 uint8_t c, r, crc24_bits = 0;
1074 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1075 uint16_t in_offset = enc->input.offset;
1076 uint16_t out_offset = enc->output.offset;
1077 struct rte_mbuf *m_in = enc->input.data;
1078 struct rte_mbuf *m_out = enc->output.data;
1079 struct rte_mbuf *m_out_head = enc->output.data;
1080 uint32_t in_length, mbuf_total_left = enc->input.length;
1082 uint16_t seg_total_left;
1084 /* Clear op status */
1087 if (mbuf_total_left > RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) {
1088 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
1089 mbuf_total_left, RTE_BBDEV_TURBO_MAX_TB_SIZE);
1090 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1094 if (m_in == NULL || m_out == NULL) {
1095 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1096 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1100 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
1101 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
1104 if (enc->code_block_mode == 0) { /* For Transport Block mode */
1105 c = enc->tb_params.c;
1106 r = enc->tb_params.r;
1107 } else { /* For Code Block mode */
1112 while (mbuf_total_left > 0 && r < c) {
1114 seg_total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1116 if (enc->code_block_mode == 0) {
1117 e = (r < enc->tb_params.cab) ?
1118 enc->tb_params.ea : enc->tb_params.eb;
1120 e = enc->cb_params.e;
1123 process_ldpc_enc_cb(q, op, e, m_in, m_out_head,
1124 m_out, in_offset, out_offset, seg_total_left,
1126 /* Update total_left */
1127 in_length = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1128 in_length = ((in_length - crc24_bits - enc->n_filler) >> 3);
1129 mbuf_total_left -= in_length;
1130 /* Update offsets for next CBs (if exist) */
1131 in_offset += in_length;
1132 out_offset += (e + 7) >> 3;
1134 /* Update offsets */
1135 if (seg_total_left == in_length) {
1136 /* Go to the next mbuf */
1138 m_out = m_out->next;
1145 /* check if all input data was processed */
1146 if (mbuf_total_left != 0) {
1147 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1149 "Mismatch between mbuf length and included CBs sizes %d",
1154 static inline uint16_t
1155 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
1156 uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
1159 #ifdef RTE_BBDEV_OFFLOAD_COST
1160 queue_stats->acc_offload_cycles = 0;
1163 for (i = 0; i < nb_ops; ++i)
1164 enqueue_enc_one_op(q, ops[i], queue_stats);
1166 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
1170 static inline uint16_t
1171 enqueue_ldpc_enc_all_ops(struct turbo_sw_queue *q,
1172 struct rte_bbdev_enc_op **ops,
1173 uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
1176 #ifdef RTE_BBDEV_OFFLOAD_COST
1177 queue_stats->acc_offload_cycles = 0;
1180 for (i = 0; i < nb_ops; ++i)
1181 enqueue_ldpc_enc_one_op(q, ops[i], queue_stats);
1183 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
1187 #ifdef RTE_BBDEV_SDK_AVX2
1189 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
1193 uint16_t kpi = ncb / 3;
1194 uint16_t nd = kpi - d;
1196 rte_memcpy(&out[nd], in, d);
1197 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
1198 rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d);
1203 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
1204 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
1205 struct rte_mbuf *m_out_head, struct rte_mbuf *m_out,
1206 uint16_t in_offset, uint16_t out_offset, bool check_crc_24b,
1207 uint16_t crc24_overlap, uint16_t in_length,
1208 struct rte_bbdev_stats *q_stats)
1210 #ifdef RTE_BBDEV_SDK_AVX2
1211 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1214 RTE_SET_USED(in_length);
1218 uint8_t *in, *out, *adapter_input;
1219 int32_t ncb, ncb_without_null;
1220 struct bblib_turbo_adapter_ul_response adapter_resp;
1221 struct bblib_turbo_adapter_ul_request adapter_req;
1222 struct bblib_turbo_decoder_request turbo_req;
1223 struct bblib_turbo_decoder_response turbo_resp;
1224 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
1225 #ifdef RTE_BBDEV_OFFLOAD_COST
1226 uint64_t start_time;
1228 RTE_SET_USED(q_stats);
1231 k_idx = compute_idx(k);
1233 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1234 ret = is_dec_input_valid(k_idx, kw, in_length);
1236 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1241 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
1243 ncb_without_null = (k + 4) * 3;
1245 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) {
1246 struct bblib_deinterleave_ul_request deint_req;
1247 struct bblib_deinterleave_ul_response deint_resp;
1249 deint_req.circ_buffer = BBLIB_FULL_CIRCULAR_BUFFER;
1250 deint_req.pharqbuffer = in;
1251 deint_req.ncb = ncb;
1252 deint_resp.pinteleavebuffer = q->deint_output;
1254 #ifdef RTE_BBDEV_OFFLOAD_COST
1255 start_time = rte_rdtsc_precise();
1257 /* Sub-block De-Interleaving */
1258 bblib_deinterleave_ul(&deint_req, &deint_resp);
1259 #ifdef RTE_BBDEV_OFFLOAD_COST
1260 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
1263 move_padding_bytes(in, q->deint_output, k, ncb);
1265 adapter_input = q->deint_output;
1267 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN)
1268 adapter_req.isinverted = 1;
1269 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)
1270 adapter_req.isinverted = 0;
1272 op->status |= 1 << RTE_BBDEV_DRV_ERROR;
1273 rte_bbdev_log(ERR, "LLR format wasn't specified");
1277 adapter_req.ncb = ncb_without_null;
1278 adapter_req.pinteleavebuffer = adapter_input;
1279 adapter_resp.pharqout = q->adapter_output;
1281 #ifdef RTE_BBDEV_OFFLOAD_COST
1282 start_time = rte_rdtsc_precise();
1284 /* Turbo decode adaptation */
1285 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
1286 #ifdef RTE_BBDEV_OFFLOAD_COST
1287 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
1290 out = (uint8_t *)mbuf_append(m_out_head, m_out,
1291 ((k - crc24_overlap) >> 3));
1293 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1294 rte_bbdev_log(ERR, "Too little space in output mbuf");
1297 /* rte_bbdev_op_data.offset can be different than the offset of the
1300 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
1302 turbo_req.c = c + 1;
1305 turbo_req.input = (int8_t *)q->adapter_output;
1307 turbo_req.k_idx = k_idx;
1308 turbo_req.max_iter_num = dec->iter_max;
1309 turbo_req.early_term_disable = !check_bit(dec->op_flags,
1310 RTE_BBDEV_TURBO_EARLY_TERMINATION);
1311 turbo_resp.ag_buf = q->ag;
1312 turbo_resp.cb_buf = q->code_block;
1313 turbo_resp.output = out;
1315 #ifdef RTE_BBDEV_OFFLOAD_COST
1316 start_time = rte_rdtsc_precise();
1319 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
1320 #ifdef RTE_BBDEV_OFFLOAD_COST
1321 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
1323 dec->hard_output.length += (k >> 3);
1326 /* Temporary solution for returned iter_count from SDK */
1327 iter_cnt = (iter_cnt - 1) >> 1;
1328 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
1330 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1331 rte_bbdev_log(ERR, "Turbo Decoder failed");
1341 RTE_SET_USED(m_out_head);
1342 RTE_SET_USED(m_out);
1343 RTE_SET_USED(in_offset);
1344 RTE_SET_USED(out_offset);
1345 RTE_SET_USED(check_crc_24b);
1346 RTE_SET_USED(crc24_overlap);
1347 RTE_SET_USED(in_length);
1348 RTE_SET_USED(q_stats);
1353 process_ldpc_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
1354 uint8_t c, uint16_t out_length, uint32_t e,
1355 struct rte_mbuf *m_in,
1356 struct rte_mbuf *m_out_head, struct rte_mbuf *m_out,
1357 struct rte_mbuf *m_harq_in,
1358 struct rte_mbuf *m_harq_out_head, struct rte_mbuf *m_harq_out,
1359 uint16_t in_offset, uint16_t out_offset,
1360 uint16_t harq_in_offset, uint16_t harq_out_offset,
1362 uint16_t crc24_overlap, uint16_t in_length,
1363 struct rte_bbdev_stats *q_stats)
1365 #ifdef RTE_BBDEV_SDK_AVX512
1366 RTE_SET_USED(in_length);
1368 uint8_t *in, *out, *harq_in, *harq_out, *adapter_input;
1369 struct bblib_rate_dematching_5gnr_request derm_req;
1370 struct bblib_rate_dematching_5gnr_response derm_resp;
1371 struct bblib_ldpc_decoder_5gnr_request dec_req;
1372 struct bblib_ldpc_decoder_5gnr_response dec_resp;
1373 struct bblib_crc_request crc_req;
1374 struct bblib_crc_response crc_resp;
1375 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1376 uint16_t K, parity_offset, sys_cols, outLenWithCrc;
1377 int16_t deRmOutSize, numRows;
1379 /* Compute some LDPC BG lengths */
1380 outLenWithCrc = out_length + (crc24_overlap >> 3);
1381 sys_cols = (dec->basegraph == 1) ? 22 : 10;
1382 K = sys_cols * dec->z_c;
1383 parity_offset = K - 2 * dec->z_c;
1385 #ifdef RTE_BBDEV_OFFLOAD_COST
1386 uint64_t start_time = rte_rdtsc_precise();
1388 RTE_SET_USED(q_stats);
1391 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
1393 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1395 * Single contiguous block from the first LLR of the
1399 if (m_harq_in != NULL)
1400 harq_in = rte_pktmbuf_mtod_offset(m_harq_in,
1401 uint8_t *, harq_in_offset);
1402 if (harq_in == NULL) {
1403 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1404 rte_bbdev_log(ERR, "No space in harq input mbuf");
1407 uint16_t harq_in_length = RTE_MIN(
1408 dec->harq_combined_input.length,
1409 (uint32_t) dec->n_cb);
1410 memset(q->ag + harq_in_length, 0,
1411 dec->n_cb - harq_in_length);
1412 rte_memcpy(q->ag, harq_in, harq_in_length);
1415 derm_req.p_in = (int8_t *) in;
1416 derm_req.p_harq = q->ag; /* This doesn't include the filler bits */
1417 derm_req.base_graph = dec->basegraph;
1418 derm_req.zc = dec->z_c;
1419 derm_req.ncb = dec->n_cb;
1421 derm_req.k0 = 0; /* Actual output from SDK */
1422 derm_req.isretx = check_bit(dec->op_flags,
1423 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1424 derm_req.rvid = dec->rv_index;
1425 derm_req.modulation_order = dec->q_m;
1426 derm_req.start_null_index = parity_offset - dec->n_filler;
1427 derm_req.num_of_null = dec->n_filler;
1429 bblib_rate_dematching_5gnr(&derm_req, &derm_resp);
1431 /* Compute RM out size and number of rows */
1432 deRmOutSize = RTE_MIN(
1433 derm_req.k0 + derm_req.e -
1434 ((derm_req.k0 < derm_req.start_null_index) ?
1436 dec->n_cb - dec->n_filler);
1437 if (m_harq_in != NULL)
1438 deRmOutSize = RTE_MAX(deRmOutSize,
1439 RTE_MIN(dec->n_cb - dec->n_filler,
1440 m_harq_in->data_len));
1441 numRows = ((deRmOutSize + dec->n_filler + dec->z_c - 1) / dec->z_c)
1443 numRows = RTE_MAX(4, numRows);
1445 /* get output data starting address */
1446 out = (uint8_t *)mbuf_append(m_out_head, m_out, out_length);
1448 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1450 "Too little space in LDPC decoder output mbuf");
1454 /* rte_bbdev_op_data.offset can be different than the offset
1455 * of the appended bytes
1457 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
1458 adapter_input = q->enc_out;
1460 dec_req.Zc = dec->z_c;
1461 dec_req.baseGraph = dec->basegraph;
1462 dec_req.nRows = numRows;
1463 dec_req.numChannelLlrs = deRmOutSize;
1464 dec_req.varNodes = derm_req.p_harq;
1465 dec_req.numFillerBits = dec->n_filler;
1466 dec_req.maxIterations = dec->iter_max;
1467 dec_req.enableEarlyTermination = check_bit(dec->op_flags,
1468 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1469 dec_resp.varNodes = (int16_t *) q->adapter_output;
1470 dec_resp.compactedMessageBytes = q->enc_out;
1472 bblib_ldpc_decoder_5gnr(&dec_req, &dec_resp);
1474 dec->iter_count = RTE_MAX(dec_resp.iterationAtTermination,
1476 if (!dec_resp.parityPassedAtTermination)
1477 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
1479 bblib_bit_reverse((int8_t *) q->enc_out, outLenWithCrc << 3);
1481 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK) ||
1482 check_bit(dec->op_flags,
1483 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK)) {
1484 crc_req.data = adapter_input;
1485 crc_req.len = K - dec->n_filler - 24;
1486 crc_resp.check_passed = false;
1487 crc_resp.data = adapter_input;
1489 bblib_lte_crc24b_check(&crc_req, &crc_resp);
1491 bblib_lte_crc24a_check(&crc_req, &crc_resp);
1492 if (!crc_resp.check_passed)
1493 op->status |= 1 << RTE_BBDEV_CRC_ERROR;
1496 #ifdef RTE_BBDEV_OFFLOAD_COST
1497 q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
1499 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1501 if (m_harq_out != NULL) {
1502 /* Initialize HARQ data length since we overwrite */
1503 m_harq_out->data_len = 0;
1504 /* Check there is enough space
1505 * in the HARQ outbound buffer
1507 harq_out = (uint8_t *)mbuf_append(m_harq_out_head,
1508 m_harq_out, deRmOutSize);
1510 if (harq_out == NULL) {
1511 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1512 rte_bbdev_log(ERR, "No space in HARQ output mbuf");
1515 /* get output data starting address and overwrite the data */
1516 harq_out = rte_pktmbuf_mtod_offset(m_harq_out, uint8_t *,
1518 rte_memcpy(harq_out, derm_req.p_harq, deRmOutSize);
1519 dec->harq_combined_output.length += deRmOutSize;
1522 rte_memcpy(out, adapter_input, out_length);
1523 dec->hard_output.length += out_length;
1528 RTE_SET_USED(out_length);
1531 RTE_SET_USED(m_out_head);
1532 RTE_SET_USED(m_out);
1533 RTE_SET_USED(m_harq_in);
1534 RTE_SET_USED(m_harq_out_head);
1535 RTE_SET_USED(m_harq_out);
1536 RTE_SET_USED(harq_in_offset);
1537 RTE_SET_USED(harq_out_offset);
1538 RTE_SET_USED(in_offset);
1539 RTE_SET_USED(out_offset);
1540 RTE_SET_USED(check_crc_24b);
1541 RTE_SET_USED(crc24_overlap);
1542 RTE_SET_USED(in_length);
1543 RTE_SET_USED(q_stats);
1549 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
1550 struct rte_bbdev_stats *queue_stats)
1554 uint16_t crc24_overlap = 0;
1555 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
1556 struct rte_mbuf *m_in = dec->input.data;
1557 struct rte_mbuf *m_out = dec->hard_output.data;
1558 struct rte_mbuf *m_out_head = dec->hard_output.data;
1559 uint16_t in_offset = dec->input.offset;
1560 uint16_t out_offset = dec->hard_output.offset;
1561 uint32_t mbuf_total_left = dec->input.length;
1562 uint16_t seg_total_left;
1564 /* Clear op status */
1567 if (m_in == NULL || m_out == NULL) {
1568 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1569 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1573 if (dec->code_block_mode == 0) { /* For Transport Block mode */
1574 c = dec->tb_params.c;
1575 } else { /* For Code Block mode */
1576 k = dec->cb_params.k;
1580 if ((c > 1) && !check_bit(dec->op_flags,
1581 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
1584 while (mbuf_total_left > 0) {
1585 if (dec->code_block_mode == 0)
1586 k = (r < dec->tb_params.c_neg) ?
1587 dec->tb_params.k_neg : dec->tb_params.k_pos;
1589 seg_total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1591 /* Calculates circular buffer size (Kw).
1592 * According to 3gpp 36.212 section 5.1.4.2
1596 * where nCol is 32 and nRow can be calculated from:
1598 * where D is the size of each output from turbo encoder block
1601 kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
1603 process_dec_cb(q, op, c, k, kw, m_in, m_out_head, m_out,
1604 in_offset, out_offset, check_bit(dec->op_flags,
1605 RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap,
1606 seg_total_left, queue_stats);
1608 /* To keep CRC24 attached to end of Code block, use
1609 * RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it
1610 * removed by default once verified.
1613 mbuf_total_left -= kw;
1615 /* Update offsets */
1616 if (seg_total_left == kw) {
1617 /* Go to the next mbuf */
1619 m_out = m_out->next;
1623 /* Update offsets for next CBs (if exist) */
1625 out_offset += ((k - crc24_overlap) >> 3);
1632 enqueue_ldpc_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
1633 struct rte_bbdev_stats *queue_stats)
1637 uint16_t out_length, crc24_overlap = 0;
1638 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1639 struct rte_mbuf *m_in = dec->input.data;
1640 struct rte_mbuf *m_harq_in = dec->harq_combined_input.data;
1641 struct rte_mbuf *m_harq_out = dec->harq_combined_output.data;
1642 struct rte_mbuf *m_harq_out_head = dec->harq_combined_output.data;
1643 struct rte_mbuf *m_out = dec->hard_output.data;
1644 struct rte_mbuf *m_out_head = dec->hard_output.data;
1645 uint16_t in_offset = dec->input.offset;
1646 uint16_t harq_in_offset = dec->harq_combined_input.offset;
1647 uint16_t harq_out_offset = dec->harq_combined_output.offset;
1648 uint16_t out_offset = dec->hard_output.offset;
1649 uint32_t mbuf_total_left = dec->input.length;
1650 uint16_t seg_total_left;
1652 /* Clear op status */
1655 if (m_in == NULL || m_out == NULL) {
1656 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1657 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1661 if (dec->code_block_mode == 0) { /* For Transport Block mode */
1662 c = dec->tb_params.c;
1663 e = dec->tb_params.ea;
1664 } else { /* For Code Block mode */
1666 e = dec->cb_params.e;
1669 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1672 out_length = (dec->basegraph == 1 ? 22 : 10) * dec->z_c; /* K */
1673 out_length = ((out_length - crc24_overlap - dec->n_filler) >> 3);
1675 while (mbuf_total_left > 0) {
1676 if (dec->code_block_mode == 0)
1677 e = (r < dec->tb_params.cab) ?
1678 dec->tb_params.ea : dec->tb_params.eb;
1679 /* Special case handling when overusing mbuf */
1680 if (e < RTE_BBDEV_LDPC_E_MAX_MBUF)
1681 seg_total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1685 process_ldpc_dec_cb(q, op, c, out_length, e,
1686 m_in, m_out_head, m_out,
1687 m_harq_in, m_harq_out_head, m_harq_out,
1688 in_offset, out_offset, harq_in_offset,
1690 check_bit(dec->op_flags,
1691 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK),
1693 seg_total_left, queue_stats);
1695 /* To keep CRC24 attached to end of Code block, use
1696 * RTE_BBDEV_LDPC_DEC_TB_CRC_24B_KEEP flag as it
1697 * removed by default once verified.
1700 mbuf_total_left -= e;
1702 /* Update offsets */
1703 if (seg_total_left == e) {
1704 /* Go to the next mbuf */
1706 m_out = m_out->next;
1707 if (m_harq_in != NULL)
1708 m_harq_in = m_harq_in->next;
1709 if (m_harq_out != NULL)
1710 m_harq_out = m_harq_out->next;
1714 harq_out_offset = 0;
1716 /* Update offsets for next CBs (if exist) */
1718 out_offset += out_length;
1724 static inline uint16_t
1725 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
1726 uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
1729 #ifdef RTE_BBDEV_OFFLOAD_COST
1730 queue_stats->acc_offload_cycles = 0;
1733 for (i = 0; i < nb_ops; ++i)
1734 enqueue_dec_one_op(q, ops[i], queue_stats);
1736 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
1740 static inline uint16_t
1741 enqueue_ldpc_dec_all_ops(struct turbo_sw_queue *q,
1742 struct rte_bbdev_dec_op **ops,
1743 uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
1746 #ifdef RTE_BBDEV_OFFLOAD_COST
1747 queue_stats->acc_offload_cycles = 0;
1750 for (i = 0; i < nb_ops; ++i)
1751 enqueue_ldpc_dec_one_op(q, ops[i], queue_stats);
1753 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
1759 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
1760 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1762 void *queue = q_data->queue_private;
1763 struct turbo_sw_queue *q = queue;
1764 uint16_t nb_enqueued = 0;
1766 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops, &q_data->queue_stats);
1768 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1769 q_data->queue_stats.enqueued_count += nb_enqueued;
1776 enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
1777 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1779 void *queue = q_data->queue_private;
1780 struct turbo_sw_queue *q = queue;
1781 uint16_t nb_enqueued = 0;
1783 nb_enqueued = enqueue_ldpc_enc_all_ops(
1784 q, ops, nb_ops, &q_data->queue_stats);
1786 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1787 q_data->queue_stats.enqueued_count += nb_enqueued;
1794 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
1795 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1797 void *queue = q_data->queue_private;
1798 struct turbo_sw_queue *q = queue;
1799 uint16_t nb_enqueued = 0;
1801 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops, &q_data->queue_stats);
1803 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1804 q_data->queue_stats.enqueued_count += nb_enqueued;
1811 enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
1812 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1814 void *queue = q_data->queue_private;
1815 struct turbo_sw_queue *q = queue;
1816 uint16_t nb_enqueued = 0;
1818 nb_enqueued = enqueue_ldpc_dec_all_ops(q, ops, nb_ops,
1819 &q_data->queue_stats);
1821 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
1822 q_data->queue_stats.enqueued_count += nb_enqueued;
1827 /* Dequeue decode burst */
1829 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
1830 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
1832 struct turbo_sw_queue *q = q_data->queue_private;
1833 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1834 (void **)ops, nb_ops, NULL);
1835 q_data->queue_stats.dequeued_count += nb_dequeued;
1840 /* Dequeue encode burst */
1842 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
1843 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
1845 struct turbo_sw_queue *q = q_data->queue_private;
1846 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
1847 (void **)ops, nb_ops, NULL);
1848 q_data->queue_stats.dequeued_count += nb_dequeued;
1853 /* Parse 16bit integer from string argument */
1855 parse_u16_arg(const char *key, const char *value, void *extra_args)
1857 uint16_t *u16 = extra_args;
1858 unsigned int long result;
1860 if ((value == NULL) || (extra_args == NULL))
1863 result = strtoul(value, NULL, 0);
1864 if ((result >= (1 << 16)) || (errno != 0)) {
1865 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
1868 *u16 = (uint16_t)result;
1872 /* Parse parameters used to create device */
1874 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args)
1876 struct rte_kvargs *kvlist = NULL;
1882 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params);
1886 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0],
1887 &parse_u16_arg, ¶ms->queues_num);
1891 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1],
1892 &parse_u16_arg, ¶ms->socket_id);
1896 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
1897 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
1898 RTE_MAX_NUMA_NODES);
1905 rte_kvargs_free(kvlist);
1911 turbo_sw_bbdev_create(struct rte_vdev_device *vdev,
1912 struct turbo_sw_params *init_params)
1914 struct rte_bbdev *bbdev;
1915 const char *name = rte_vdev_device_name(vdev);
1917 bbdev = rte_bbdev_allocate(name);
1921 bbdev->data->dev_private = rte_zmalloc_socket(name,
1922 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
1923 init_params->socket_id);
1924 if (bbdev->data->dev_private == NULL) {
1925 rte_bbdev_release(bbdev);
1929 bbdev->dev_ops = &pmd_ops;
1930 bbdev->device = &vdev->device;
1931 bbdev->data->socket_id = init_params->socket_id;
1932 bbdev->intr_handle = NULL;
1934 /* register rx/tx burst functions for data path */
1935 bbdev->dequeue_enc_ops = dequeue_enc_ops;
1936 bbdev->dequeue_dec_ops = dequeue_dec_ops;
1937 bbdev->enqueue_enc_ops = enqueue_enc_ops;
1938 bbdev->enqueue_dec_ops = enqueue_dec_ops;
1939 bbdev->dequeue_ldpc_enc_ops = dequeue_enc_ops;
1940 bbdev->dequeue_ldpc_dec_ops = dequeue_dec_ops;
1941 bbdev->enqueue_ldpc_enc_ops = enqueue_ldpc_enc_ops;
1942 bbdev->enqueue_ldpc_dec_ops = enqueue_ldpc_dec_ops;
1943 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
1944 init_params->queues_num;
1949 /* Initialise device */
1951 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev)
1953 struct turbo_sw_params init_params = {
1955 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
1958 const char *input_args;
1963 name = rte_vdev_device_name(vdev);
1966 input_args = rte_vdev_device_args(vdev);
1967 parse_turbo_sw_params(&init_params, input_args);
1969 rte_bbdev_log_debug(
1970 "Initialising %s on NUMA node %d with max queues: %d\n",
1971 name, init_params.socket_id, init_params.queues_num);
1973 return turbo_sw_bbdev_create(vdev, &init_params);
1976 /* Uninitialise device */
1978 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev)
1980 struct rte_bbdev *bbdev;
1986 name = rte_vdev_device_name(vdev);
1990 bbdev = rte_bbdev_get_named_dev(name);
1994 rte_free(bbdev->data->dev_private);
1996 return rte_bbdev_release(bbdev);
1999 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = {
2000 .probe = turbo_sw_bbdev_probe,
2001 .remove = turbo_sw_bbdev_remove
2004 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
2005 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
2006 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
2007 TURBO_SW_SOCKET_ID_ARG"=<int>");
2008 RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw);