1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <rte_common.h>
12 #include <rte_launch.h>
13 #include <rte_bbdev.h>
14 #include <rte_cycles.h>
15 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_hexdump.h>
19 #include <rte_interrupts.h>
22 #include "test_bbdev_vector.h"
24 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
26 #define MAX_QUEUES RTE_MAX_LCORE
27 #define TEST_REPETITIONS 100
28 #define WAIT_OFFLOAD_US 1000
30 #ifdef RTE_BASEBAND_FPGA_LTE_FEC
31 #include <fpga_lte_fec.h>
32 #define FPGA_LTE_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
33 #define FPGA_LTE_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
34 #define VF_UL_4G_QUEUE_VALUE 4
35 #define VF_DL_4G_QUEUE_VALUE 4
36 #define UL_4G_BANDWIDTH 3
37 #define DL_4G_BANDWIDTH 3
38 #define UL_4G_LOAD_BALANCE 128
39 #define DL_4G_LOAD_BALANCE 128
40 #define FLR_4G_TIMEOUT 610
43 #ifdef RTE_BASEBAND_FPGA_5GNR_FEC
44 #include <rte_pmd_fpga_5gnr_fec.h>
45 #define FPGA_5GNR_PF_DRIVER_NAME ("intel_fpga_5gnr_fec_pf")
46 #define FPGA_5GNR_VF_DRIVER_NAME ("intel_fpga_5gnr_fec_vf")
47 #define VF_UL_5G_QUEUE_VALUE 4
48 #define VF_DL_5G_QUEUE_VALUE 4
49 #define UL_5G_BANDWIDTH 3
50 #define DL_5G_BANDWIDTH 3
51 #define UL_5G_LOAD_BALANCE 128
52 #define DL_5G_LOAD_BALANCE 128
53 #define FLR_5G_TIMEOUT 610
56 #ifdef RTE_BASEBAND_ACC100
57 #include <rte_acc100_cfg.h>
58 #define ACC100PF_DRIVER_NAME ("intel_acc100_pf")
59 #define ACC100VF_DRIVER_NAME ("intel_acc100_vf")
60 #define ACC100_QMGR_NUM_AQS 16
61 #define ACC100_QMGR_NUM_QGS 2
62 #define ACC100_QMGR_AQ_DEPTH 5
63 #define ACC100_QMGR_INVALID_IDX -1
64 #define ACC100_QMGR_RR 1
65 #define ACC100_QOS_GBR 0
68 #define OPS_CACHE_SIZE 256U
69 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
73 #define INVALID_OPAQUE -1
75 #define INVALID_QUEUE_ID -1
76 /* Increment for next code block in external HARQ memory */
77 #define HARQ_INCR 32768
78 /* Headroom for filler LLRs insertion in HARQ buffer */
79 #define FILLER_HEADROOM 1024
80 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
81 #define N_ZC_1 66 /* N = 66 Zc for BG 1 */
82 #define N_ZC_2 50 /* N = 50 Zc for BG 2 */
83 #define K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */
84 #define K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */
85 #define K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */
86 #define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
87 #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
88 #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
90 static struct test_bbdev_vector test_vector;
92 /* Switch between PMD and Interrupt for throughput TC */
93 static bool intr_enabled;
95 /* LLR arithmetic representation for numerical conversion */
96 static int ldpc_llr_decimals;
97 static int ldpc_llr_size;
98 /* Keep track of the LDPC decoder device capability flag */
99 static uint32_t ldpc_cap_flags;
101 /* Represents tested active devices */
102 static struct active_device {
103 const char *driver_name;
105 uint16_t supported_ops;
106 uint16_t queue_ids[MAX_QUEUES];
108 struct rte_mempool *ops_mempool;
109 struct rte_mempool *in_mbuf_pool;
110 struct rte_mempool *hard_out_mbuf_pool;
111 struct rte_mempool *soft_out_mbuf_pool;
112 struct rte_mempool *harq_in_mbuf_pool;
113 struct rte_mempool *harq_out_mbuf_pool;
114 } active_devs[RTE_BBDEV_MAX_DEVS];
116 static uint8_t nb_active_devs;
118 /* Data buffers used by BBDEV ops */
119 struct test_buffers {
120 struct rte_bbdev_op_data *inputs;
121 struct rte_bbdev_op_data *hard_outputs;
122 struct rte_bbdev_op_data *soft_outputs;
123 struct rte_bbdev_op_data *harq_inputs;
124 struct rte_bbdev_op_data *harq_outputs;
127 /* Operation parameters specific for given test case */
128 struct test_op_params {
129 struct rte_mempool *mp;
130 struct rte_bbdev_dec_op *ref_dec_op;
131 struct rte_bbdev_enc_op *ref_enc_op;
133 uint16_t num_to_process;
137 struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
140 /* Contains per lcore params */
141 struct thread_params {
151 rte_atomic16_t nb_dequeued;
152 rte_atomic16_t processing_status;
153 rte_atomic16_t burst_sz;
154 struct test_op_params *op_params;
155 struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
156 struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
159 #ifdef RTE_BBDEV_OFFLOAD_COST
160 /* Stores time statistics */
161 struct test_time_stats {
162 /* Stores software enqueue total working time */
163 uint64_t enq_sw_total_time;
164 /* Stores minimum value of software enqueue working time */
165 uint64_t enq_sw_min_time;
166 /* Stores maximum value of software enqueue working time */
167 uint64_t enq_sw_max_time;
168 /* Stores turbo enqueue total working time */
169 uint64_t enq_acc_total_time;
170 /* Stores minimum value of accelerator enqueue working time */
171 uint64_t enq_acc_min_time;
172 /* Stores maximum value of accelerator enqueue working time */
173 uint64_t enq_acc_max_time;
174 /* Stores dequeue total working time */
175 uint64_t deq_total_time;
176 /* Stores minimum value of dequeue working time */
177 uint64_t deq_min_time;
178 /* Stores maximum value of dequeue working time */
179 uint64_t deq_max_time;
183 typedef int (test_case_function)(struct active_device *ad,
184 struct test_op_params *op_params);
187 mbuf_reset(struct rte_mbuf *m)
197 /* Read flag value 0/1 from bitmap */
199 check_bit(uint32_t bitmap, uint32_t bitmask)
201 return bitmap & bitmask;
205 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
207 ad->supported_ops |= (1 << op_type);
211 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
213 return ad->supported_ops & (1 << op_type);
217 flags_match(uint32_t flags_req, uint32_t flags_present)
219 return (flags_req & flags_present) == flags_req;
223 clear_soft_out_cap(uint32_t *op_flags)
225 *op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
226 *op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
227 *op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
230 /* This API is to convert all the test vector op data entries
231 * to big endian format. It is used when the device supports
232 * the input in the big endian format.
235 convert_op_data_to_be(void)
237 struct op_data_entries *op;
238 enum op_data_type type;
239 uint8_t nb_segs, *rem_data, temp;
241 int complete, rem, i, j;
243 for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
244 nb_segs = test_vector.entries[type].nb_segments;
245 op = &test_vector.entries[type];
247 /* Invert byte endianness for all the segments */
248 for (i = 0; i < nb_segs; ++i) {
249 len = op->segments[i].length;
250 data = op->segments[i].addr;
252 /* Swap complete u32 bytes */
254 for (j = 0; j < complete; j++)
255 data[j] = rte_bswap32(data[j]);
257 /* Swap any remaining bytes */
259 rem_data = (uint8_t *)&data[j];
260 for (j = 0; j < rem/2; j++) {
262 rem_data[j] = rem_data[rem - j - 1];
263 rem_data[rem - j - 1] = temp;
270 check_dev_cap(const struct rte_bbdev_info *dev_info)
273 unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
274 nb_harq_inputs, nb_harq_outputs;
275 const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
276 uint8_t dev_data_endianness = dev_info->drv.data_endianness;
278 nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
279 nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
280 nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
281 nb_harq_inputs = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
282 nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
284 for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
285 if (op_cap->type != test_vector.op_type)
288 if (dev_data_endianness == RTE_BIG_ENDIAN)
289 convert_op_data_to_be();
291 if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
292 const struct rte_bbdev_op_cap_turbo_dec *cap =
293 &op_cap->cap.turbo_dec;
294 /* Ignore lack of soft output capability, just skip
295 * checking if soft output is valid.
297 if ((test_vector.turbo_dec.op_flags &
298 RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
299 !(cap->capability_flags &
300 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
302 "INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
305 &test_vector.turbo_dec.op_flags);
308 if (!flags_match(test_vector.turbo_dec.op_flags,
309 cap->capability_flags))
311 if (nb_inputs > cap->num_buffers_src) {
312 printf("Too many inputs defined: %u, max: %u\n",
313 nb_inputs, cap->num_buffers_src);
316 if (nb_soft_outputs > cap->num_buffers_soft_out &&
317 (test_vector.turbo_dec.op_flags &
318 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
320 "Too many soft outputs defined: %u, max: %u\n",
322 cap->num_buffers_soft_out);
325 if (nb_hard_outputs > cap->num_buffers_hard_out) {
327 "Too many hard outputs defined: %u, max: %u\n",
329 cap->num_buffers_hard_out);
332 if (intr_enabled && !(cap->capability_flags &
333 RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
335 "Dequeue interrupts are not supported!\n");
340 } else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
341 const struct rte_bbdev_op_cap_turbo_enc *cap =
342 &op_cap->cap.turbo_enc;
344 if (!flags_match(test_vector.turbo_enc.op_flags,
345 cap->capability_flags))
347 if (nb_inputs > cap->num_buffers_src) {
348 printf("Too many inputs defined: %u, max: %u\n",
349 nb_inputs, cap->num_buffers_src);
352 if (nb_hard_outputs > cap->num_buffers_dst) {
354 "Too many hard outputs defined: %u, max: %u\n",
355 nb_hard_outputs, cap->num_buffers_dst);
358 if (intr_enabled && !(cap->capability_flags &
359 RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
361 "Dequeue interrupts are not supported!\n");
366 } else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
367 const struct rte_bbdev_op_cap_ldpc_enc *cap =
368 &op_cap->cap.ldpc_enc;
370 if (!flags_match(test_vector.ldpc_enc.op_flags,
371 cap->capability_flags)){
372 printf("Flag Mismatch\n");
375 if (nb_inputs > cap->num_buffers_src) {
376 printf("Too many inputs defined: %u, max: %u\n",
377 nb_inputs, cap->num_buffers_src);
380 if (nb_hard_outputs > cap->num_buffers_dst) {
382 "Too many hard outputs defined: %u, max: %u\n",
383 nb_hard_outputs, cap->num_buffers_dst);
386 if (intr_enabled && !(cap->capability_flags &
387 RTE_BBDEV_LDPC_ENC_INTERRUPTS)) {
389 "Dequeue interrupts are not supported!\n");
394 } else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
395 const struct rte_bbdev_op_cap_ldpc_dec *cap =
396 &op_cap->cap.ldpc_dec;
398 if (!flags_match(test_vector.ldpc_dec.op_flags,
399 cap->capability_flags)){
400 printf("Flag Mismatch\n");
403 if (nb_inputs > cap->num_buffers_src) {
404 printf("Too many inputs defined: %u, max: %u\n",
405 nb_inputs, cap->num_buffers_src);
408 if (nb_hard_outputs > cap->num_buffers_hard_out) {
410 "Too many hard outputs defined: %u, max: %u\n",
412 cap->num_buffers_hard_out);
415 if (nb_harq_inputs > cap->num_buffers_hard_out) {
417 "Too many HARQ inputs defined: %u, max: %u\n",
419 cap->num_buffers_hard_out);
422 if (nb_harq_outputs > cap->num_buffers_hard_out) {
424 "Too many HARQ outputs defined: %u, max: %u\n",
426 cap->num_buffers_hard_out);
429 if (intr_enabled && !(cap->capability_flags &
430 RTE_BBDEV_LDPC_DEC_INTERRUPTS)) {
432 "Dequeue interrupts are not supported!\n");
435 if (intr_enabled && (test_vector.ldpc_dec.op_flags &
436 (RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
437 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
438 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
440 printf("Skip loop-back with interrupt\n");
447 if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
448 return TEST_SUCCESS; /* Special case for NULL device */
453 /* calculates optimal mempool size not smaller than the val */
455 optimal_mempool_size(unsigned int val)
457 return rte_align32pow2(val + 1) - 1;
460 /* allocates mbuf mempool for inputs and outputs */
461 static struct rte_mempool *
462 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
463 int socket_id, unsigned int mbuf_pool_size,
464 const char *op_type_str)
467 uint32_t max_seg_sz = 0;
468 char pool_name[RTE_MEMPOOL_NAMESIZE];
470 /* find max input segment size */
471 for (i = 0; i < entries->nb_segments; ++i)
472 if (entries->segments[i].length > max_seg_sz)
473 max_seg_sz = entries->segments[i].length;
475 snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
477 return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
478 RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM
480 (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
484 create_mempools(struct active_device *ad, int socket_id,
485 enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
487 struct rte_mempool *mp;
488 unsigned int ops_pool_size, mbuf_pool_size = 0;
489 char pool_name[RTE_MEMPOOL_NAMESIZE];
490 const char *op_type_str;
491 enum rte_bbdev_op_type op_type = org_op_type;
493 struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
494 struct op_data_entries *hard_out =
495 &test_vector.entries[DATA_HARD_OUTPUT];
496 struct op_data_entries *soft_out =
497 &test_vector.entries[DATA_SOFT_OUTPUT];
498 struct op_data_entries *harq_in =
499 &test_vector.entries[DATA_HARQ_INPUT];
500 struct op_data_entries *harq_out =
501 &test_vector.entries[DATA_HARQ_OUTPUT];
503 /* allocate ops mempool */
504 ops_pool_size = optimal_mempool_size(RTE_MAX(
505 /* Ops used plus 1 reference op */
506 RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
507 /* Minimal cache size plus 1 reference op */
508 (unsigned int)(1.5 * rte_lcore_count() *
509 OPS_CACHE_SIZE + 1)),
512 if (org_op_type == RTE_BBDEV_OP_NONE)
513 op_type = RTE_BBDEV_OP_TURBO_ENC;
515 op_type_str = rte_bbdev_op_type_str(op_type);
516 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
518 snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
520 mp = rte_bbdev_op_pool_create(pool_name, op_type,
521 ops_pool_size, OPS_CACHE_SIZE, socket_id);
522 TEST_ASSERT_NOT_NULL(mp,
523 "ERROR Failed to create %u items ops pool for dev %u on socket %u.",
527 ad->ops_mempool = mp;
529 /* Do not create inputs and outputs mbufs for BaseBand Null Device */
530 if (org_op_type == RTE_BBDEV_OP_NONE)
534 if (in->nb_segments > 0) {
535 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
537 mp = create_mbuf_pool(in, ad->dev_id, socket_id,
538 mbuf_pool_size, "in");
539 TEST_ASSERT_NOT_NULL(mp,
540 "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
544 ad->in_mbuf_pool = mp;
548 if (hard_out->nb_segments > 0) {
549 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
550 hard_out->nb_segments);
551 mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id,
554 TEST_ASSERT_NOT_NULL(mp,
555 "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
559 ad->hard_out_mbuf_pool = mp;
563 if (soft_out->nb_segments > 0) {
564 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
565 soft_out->nb_segments);
566 mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
569 TEST_ASSERT_NOT_NULL(mp,
570 "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
574 ad->soft_out_mbuf_pool = mp;
578 if (harq_in->nb_segments > 0) {
579 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
580 harq_in->nb_segments);
581 mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
584 TEST_ASSERT_NOT_NULL(mp,
585 "ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
589 ad->harq_in_mbuf_pool = mp;
593 if (harq_out->nb_segments > 0) {
594 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
595 harq_out->nb_segments);
596 mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
599 TEST_ASSERT_NOT_NULL(mp,
600 "ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
604 ad->harq_out_mbuf_pool = mp;
611 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
612 struct test_bbdev_vector *vector)
615 unsigned int queue_id;
616 struct rte_bbdev_queue_conf qconf;
617 struct active_device *ad = &active_devs[nb_active_devs];
618 unsigned int nb_queues;
619 enum rte_bbdev_op_type op_type = vector->op_type;
621 /* Configure fpga lte fec with PF & VF values
622 * if '-i' flag is set and using fpga device
624 #ifdef RTE_BASEBAND_FPGA_LTE_FEC
625 if ((get_init_device() == true) &&
626 (!strcmp(info->drv.driver_name, FPGA_LTE_PF_DRIVER_NAME))) {
627 struct rte_fpga_lte_fec_conf conf;
630 printf("Configure FPGA LTE FEC Driver %s with default values\n",
631 info->drv.driver_name);
633 /* clear default configuration before initialization */
634 memset(&conf, 0, sizeof(struct rte_fpga_lte_fec_conf));
637 * true if PF is used for data plane
640 conf.pf_mode_en = true;
642 for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
643 /* Number of UL queues per VF (fpga supports 8 VFs) */
644 conf.vf_ul_queues_number[i] = VF_UL_4G_QUEUE_VALUE;
645 /* Number of DL queues per VF (fpga supports 8 VFs) */
646 conf.vf_dl_queues_number[i] = VF_DL_4G_QUEUE_VALUE;
649 /* UL bandwidth. Needed for schedule algorithm */
650 conf.ul_bandwidth = UL_4G_BANDWIDTH;
652 conf.dl_bandwidth = DL_4G_BANDWIDTH;
654 /* UL & DL load Balance Factor to 64 */
655 conf.ul_load_balance = UL_4G_LOAD_BALANCE;
656 conf.dl_load_balance = DL_4G_LOAD_BALANCE;
658 /**< FLR timeout value */
659 conf.flr_time_out = FLR_4G_TIMEOUT;
661 /* setup FPGA PF with configuration information */
662 ret = rte_fpga_lte_fec_configure(info->dev_name, &conf);
663 TEST_ASSERT_SUCCESS(ret,
664 "Failed to configure 4G FPGA PF for bbdev %s",
668 #ifdef RTE_BASEBAND_FPGA_5GNR_FEC
669 if ((get_init_device() == true) &&
670 (!strcmp(info->drv.driver_name, FPGA_5GNR_PF_DRIVER_NAME))) {
671 struct rte_fpga_5gnr_fec_conf conf;
674 printf("Configure FPGA 5GNR FEC Driver %s with default values\n",
675 info->drv.driver_name);
677 /* clear default configuration before initialization */
678 memset(&conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
681 * true if PF is used for data plane
684 conf.pf_mode_en = true;
686 for (i = 0; i < FPGA_5GNR_FEC_NUM_VFS; ++i) {
687 /* Number of UL queues per VF (fpga supports 8 VFs) */
688 conf.vf_ul_queues_number[i] = VF_UL_5G_QUEUE_VALUE;
689 /* Number of DL queues per VF (fpga supports 8 VFs) */
690 conf.vf_dl_queues_number[i] = VF_DL_5G_QUEUE_VALUE;
693 /* UL bandwidth. Needed for schedule algorithm */
694 conf.ul_bandwidth = UL_5G_BANDWIDTH;
696 conf.dl_bandwidth = DL_5G_BANDWIDTH;
698 /* UL & DL load Balance Factor to 64 */
699 conf.ul_load_balance = UL_5G_LOAD_BALANCE;
700 conf.dl_load_balance = DL_5G_LOAD_BALANCE;
702 /**< FLR timeout value */
703 conf.flr_time_out = FLR_5G_TIMEOUT;
705 /* setup FPGA PF with configuration information */
706 ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf);
707 TEST_ASSERT_SUCCESS(ret,
708 "Failed to configure 5G FPGA PF for bbdev %s",
712 #ifdef RTE_BASEBAND_ACC100
713 if ((get_init_device() == true) &&
714 (!strcmp(info->drv.driver_name, ACC100PF_DRIVER_NAME))) {
715 struct rte_acc100_conf conf;
718 printf("Configure ACC100 FEC Driver %s with default values\n",
719 info->drv.driver_name);
721 /* clear default configuration before initialization */
722 memset(&conf, 0, sizeof(struct rte_acc100_conf));
724 /* Always set in PF mode for built-in configuration */
725 conf.pf_mode_en = true;
726 for (i = 0; i < RTE_ACC100_NUM_VFS; ++i) {
727 conf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
728 conf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
729 conf.arb_dl_4g[i].round_robin_weight = ACC100_QMGR_RR;
730 conf.arb_ul_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
731 conf.arb_ul_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
732 conf.arb_ul_4g[i].round_robin_weight = ACC100_QMGR_RR;
733 conf.arb_dl_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
734 conf.arb_dl_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
735 conf.arb_dl_5g[i].round_robin_weight = ACC100_QMGR_RR;
736 conf.arb_ul_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
737 conf.arb_ul_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
738 conf.arb_ul_5g[i].round_robin_weight = ACC100_QMGR_RR;
741 conf.input_pos_llr_1_bit = true;
742 conf.output_pos_llr_1_bit = true;
743 conf.num_vf_bundles = 1; /**< Number of VF bundles to setup */
745 conf.q_ul_4g.num_qgroups = ACC100_QMGR_NUM_QGS;
746 conf.q_ul_4g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
747 conf.q_ul_4g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
748 conf.q_ul_4g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
749 conf.q_dl_4g.num_qgroups = ACC100_QMGR_NUM_QGS;
750 conf.q_dl_4g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
751 conf.q_dl_4g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
752 conf.q_dl_4g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
753 conf.q_ul_5g.num_qgroups = ACC100_QMGR_NUM_QGS;
754 conf.q_ul_5g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
755 conf.q_ul_5g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
756 conf.q_ul_5g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
757 conf.q_dl_5g.num_qgroups = ACC100_QMGR_NUM_QGS;
758 conf.q_dl_5g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
759 conf.q_dl_5g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
760 conf.q_dl_5g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
762 /* setup PF with configuration information */
763 ret = rte_acc100_configure(info->dev_name, &conf);
764 TEST_ASSERT_SUCCESS(ret,
765 "Failed to configure ACC100 PF for bbdev %s",
769 /* Let's refresh this now this is configured */
770 rte_bbdev_info_get(dev_id, info);
771 nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
772 nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
775 ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
777 printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
778 dev_id, nb_queues, info->socket_id, ret);
782 /* configure interrupts if needed */
784 ret = rte_bbdev_intr_enable(dev_id);
786 printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
792 /* setup device queues */
793 qconf.socket = info->socket_id;
794 qconf.queue_size = info->drv.default_queue_conf.queue_size;
796 qconf.deferred_start = 0;
797 qconf.op_type = op_type;
799 for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
800 ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
803 "Allocated all queues (id=%u) at prio%u on dev%u\n",
804 queue_id, qconf.priority, dev_id);
806 ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
810 printf("All queues on dev %u allocated: %u\n",
814 ad->queue_ids[queue_id] = queue_id;
816 TEST_ASSERT(queue_id != 0,
817 "ERROR Failed to configure any queues on dev %u",
819 ad->nb_queues = queue_id;
821 set_avail_op(ad, op_type);
827 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
828 struct test_bbdev_vector *vector)
832 active_devs[nb_active_devs].driver_name = info->drv.driver_name;
833 active_devs[nb_active_devs].dev_id = dev_id;
835 ret = add_bbdev_dev(dev_id, info, vector);
836 if (ret == TEST_SUCCESS)
842 populate_active_devices(void)
846 uint8_t nb_devs_added = 0;
847 struct rte_bbdev_info info;
849 RTE_BBDEV_FOREACH(dev_id) {
850 rte_bbdev_info_get(dev_id, &info);
852 if (check_dev_cap(&info)) {
854 "Device %d (%s) does not support specified capabilities\n",
855 dev_id, info.dev_name);
859 ret = add_active_device(dev_id, &info, &test_vector);
861 printf("Adding active bbdev %s skipped\n",
868 return nb_devs_added;
872 read_test_vector(void)
876 memset(&test_vector, 0, sizeof(test_vector));
877 printf("Test vector file = %s\n", get_vector_filename());
878 ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
879 TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
880 get_vector_filename());
886 testsuite_setup(void)
888 TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
890 if (populate_active_devices() == 0) {
891 printf("No suitable devices found!\n");
899 interrupt_testsuite_setup(void)
901 TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
903 /* Enable interrupts */
906 /* Special case for NULL device (RTE_BBDEV_OP_NONE) */
907 if (populate_active_devices() == 0 ||
908 test_vector.op_type == RTE_BBDEV_OP_NONE) {
909 intr_enabled = false;
910 printf("No suitable devices found!\n");
918 testsuite_teardown(void)
922 /* Unconfigure devices */
923 RTE_BBDEV_FOREACH(dev_id)
924 rte_bbdev_close(dev_id);
926 /* Clear active devices structs. */
927 memset(active_devs, 0, sizeof(active_devs));
930 /* Disable interrupts */
931 intr_enabled = false;
939 for (i = 0; i < nb_active_devs; i++) {
940 dev_id = active_devs[i].dev_id;
941 /* reset bbdev stats */
942 TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
943 "Failed to reset stats of bbdev %u", dev_id);
944 /* start the device */
945 TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
946 "Failed to start bbdev %u", dev_id);
956 struct rte_bbdev_stats stats;
958 for (i = 0; i < nb_active_devs; i++) {
959 dev_id = active_devs[i].dev_id;
960 /* read stats and print */
961 rte_bbdev_stats_get(dev_id, &stats);
962 /* Stop the device */
963 rte_bbdev_stop(dev_id);
968 init_op_data_objs(struct rte_bbdev_op_data *bufs,
969 struct op_data_entries *ref_entries,
970 struct rte_mempool *mbuf_pool, const uint16_t n,
971 enum op_data_type op_type, uint16_t min_alignment)
975 bool large_input = false;
977 for (i = 0; i < n; ++i) {
979 struct op_data_buf *seg = &ref_entries->segments[0];
980 struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
981 TEST_ASSERT_NOT_NULL(m_head,
982 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
983 op_type, n * ref_entries->nb_segments,
986 if (seg->length > RTE_BBDEV_LDPC_E_MAX_MBUF) {
988 * Special case when DPDK mbuf cannot handle
989 * the required input size
991 printf("Warning: Larger input size than DPDK mbuf %d\n",
995 bufs[i].data = m_head;
999 if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
1000 if ((op_type == DATA_INPUT) && large_input) {
1001 /* Allocate a fake overused mbuf */
1002 data = rte_malloc(NULL, seg->length, 0);
1003 TEST_ASSERT_NOT_NULL(data,
1004 "rte malloc failed with %u bytes",
1006 memcpy(data, seg->addr, seg->length);
1007 m_head->buf_addr = data;
1008 m_head->buf_iova = rte_malloc_virt2iova(data);
1009 m_head->data_off = 0;
1010 m_head->data_len = seg->length;
1012 data = rte_pktmbuf_append(m_head, seg->length);
1013 TEST_ASSERT_NOT_NULL(data,
1014 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
1015 seg->length, op_type);
1017 TEST_ASSERT(data == RTE_PTR_ALIGN(
1018 data, min_alignment),
1019 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
1020 data, min_alignment);
1021 rte_memcpy(data, seg->addr, seg->length);
1024 bufs[i].length += seg->length;
1026 for (j = 1; j < ref_entries->nb_segments; ++j) {
1027 struct rte_mbuf *m_tail =
1028 rte_pktmbuf_alloc(mbuf_pool);
1029 TEST_ASSERT_NOT_NULL(m_tail,
1030 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
1032 n * ref_entries->nb_segments,
1036 data = rte_pktmbuf_append(m_tail, seg->length);
1037 TEST_ASSERT_NOT_NULL(data,
1038 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
1039 seg->length, op_type);
1041 TEST_ASSERT(data == RTE_PTR_ALIGN(data,
1043 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
1044 data, min_alignment);
1045 rte_memcpy(data, seg->addr, seg->length);
1046 bufs[i].length += seg->length;
1048 ret = rte_pktmbuf_chain(m_head, m_tail);
1049 TEST_ASSERT_SUCCESS(ret,
1050 "Couldn't chain mbufs from %d data type mbuf pool",
1055 /* allocate chained-mbuf for output buffer */
1056 for (j = 1; j < ref_entries->nb_segments; ++j) {
1057 struct rte_mbuf *m_tail =
1058 rte_pktmbuf_alloc(mbuf_pool);
1059 TEST_ASSERT_NOT_NULL(m_tail,
1060 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
1062 n * ref_entries->nb_segments,
1065 ret = rte_pktmbuf_chain(m_head, m_tail);
1066 TEST_ASSERT_SUCCESS(ret,
1067 "Couldn't chain mbufs from %d data type mbuf pool",
1077 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
1082 *buffers = rte_zmalloc_socket(NULL, len, 0, socket);
1083 if (*buffers == NULL) {
1084 printf("WARNING: Failed to allocate op_data on socket %d\n",
1086 /* try to allocate memory on other detected sockets */
1087 for (i = 0; i < socket; i++) {
1088 *buffers = rte_zmalloc_socket(NULL, len, 0, i);
1089 if (*buffers != NULL)
1094 return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
1098 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
1099 const uint16_t n, const int8_t max_llr_modulus)
1101 uint16_t i, byte_idx;
1103 for (i = 0; i < n; ++i) {
1104 struct rte_mbuf *m = input_ops[i].data;
1106 int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
1107 input_ops[i].offset);
1108 for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
1110 llr[byte_idx] = round((double)max_llr_modulus *
1111 llr[byte_idx] / INT8_MAX);
1119 * We may have to insert filler bits
1120 * when they are required by the HARQ assumption
1123 ldpc_add_filler(struct rte_bbdev_op_data *input_ops,
1124 const uint16_t n, struct test_op_params *op_params)
1126 struct rte_bbdev_op_ldpc_dec dec = op_params->ref_dec_op->ldpc_dec;
1128 if (input_ops == NULL)
1130 /* No need to add filler if not required by device */
1131 if (!(ldpc_cap_flags &
1132 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS))
1134 /* No need to add filler for loopback operation */
1135 if (dec.op_flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
1138 uint16_t i, j, parity_offset;
1139 for (i = 0; i < n; ++i) {
1140 struct rte_mbuf *m = input_ops[i].data;
1141 int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
1142 input_ops[i].offset);
1143 parity_offset = (dec.basegraph == 1 ? 20 : 8)
1144 * dec.z_c - dec.n_filler;
1145 uint16_t new_hin_size = input_ops[i].length + dec.n_filler;
1146 m->data_len = new_hin_size;
1147 input_ops[i].length = new_hin_size;
1148 for (j = new_hin_size - 1; j >= parity_offset + dec.n_filler;
1150 llr[j] = llr[j - dec.n_filler];
1151 uint16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
1152 for (j = 0; j < dec.n_filler; j++)
1153 llr[parity_offset + j] = llr_max_pre_scaling;
1158 ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
1159 const uint16_t n, const int8_t llr_size,
1160 const int8_t llr_decimals)
1162 if (input_ops == NULL)
1165 uint16_t i, byte_idx;
1167 int16_t llr_max, llr_min, llr_tmp;
1168 llr_max = (1 << (llr_size - 1)) - 1;
1170 for (i = 0; i < n; ++i) {
1171 struct rte_mbuf *m = input_ops[i].data;
1173 int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
1174 input_ops[i].offset);
1175 for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
1178 llr_tmp = llr[byte_idx];
1179 if (llr_decimals == 4)
1181 else if (llr_decimals == 2)
1183 else if (llr_decimals == 0)
1185 llr_tmp = RTE_MIN(llr_max,
1186 RTE_MAX(llr_min, llr_tmp));
1187 llr[byte_idx] = (int8_t) llr_tmp;
1198 fill_queue_buffers(struct test_op_params *op_params,
1199 struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
1200 struct rte_mempool *soft_out_mp,
1201 struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
1203 const struct rte_bbdev_op_cap *capabilities,
1204 uint16_t min_alignment, const int socket_id)
1207 enum op_data_type type;
1208 const uint16_t n = op_params->num_to_process;
1210 struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
1218 struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
1219 &op_params->q_bufs[socket_id][queue_id].inputs,
1220 &op_params->q_bufs[socket_id][queue_id].soft_outputs,
1221 &op_params->q_bufs[socket_id][queue_id].hard_outputs,
1222 &op_params->q_bufs[socket_id][queue_id].harq_inputs,
1223 &op_params->q_bufs[socket_id][queue_id].harq_outputs,
1226 for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
1227 struct op_data_entries *ref_entries =
1228 &test_vector.entries[type];
1229 if (ref_entries->nb_segments == 0)
1232 ret = allocate_buffers_on_socket(queue_ops[type],
1233 n * sizeof(struct rte_bbdev_op_data),
1235 TEST_ASSERT_SUCCESS(ret,
1236 "Couldn't allocate memory for rte_bbdev_op_data structs");
1238 ret = init_op_data_objs(*queue_ops[type], ref_entries,
1239 mbuf_pools[type], n, type, min_alignment);
1240 TEST_ASSERT_SUCCESS(ret,
1241 "Couldn't init rte_bbdev_op_data structs");
1244 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1245 limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
1246 capabilities->cap.turbo_dec.max_llr_modulus);
1248 if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
1249 bool loopback = op_params->ref_dec_op->ldpc_dec.op_flags &
1250 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
1251 bool llr_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
1252 RTE_BBDEV_LDPC_LLR_COMPRESSION;
1253 bool harq_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
1254 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
1255 ldpc_llr_decimals = capabilities->cap.ldpc_dec.llr_decimals;
1256 ldpc_llr_size = capabilities->cap.ldpc_dec.llr_size;
1257 ldpc_cap_flags = capabilities->cap.ldpc_dec.capability_flags;
1258 if (!loopback && !llr_comp)
1259 ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
1260 ldpc_llr_size, ldpc_llr_decimals);
1261 if (!loopback && !harq_comp)
1262 ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
1263 ldpc_llr_size, ldpc_llr_decimals);
1265 ldpc_add_filler(*queue_ops[DATA_HARQ_INPUT], n,
1273 free_buffers(struct active_device *ad, struct test_op_params *op_params)
1277 rte_mempool_free(ad->ops_mempool);
1278 rte_mempool_free(ad->in_mbuf_pool);
1279 rte_mempool_free(ad->hard_out_mbuf_pool);
1280 rte_mempool_free(ad->soft_out_mbuf_pool);
1281 rte_mempool_free(ad->harq_in_mbuf_pool);
1282 rte_mempool_free(ad->harq_out_mbuf_pool);
1284 for (i = 0; i < rte_lcore_count(); ++i) {
1285 for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
1286 rte_free(op_params->q_bufs[j][i].inputs);
1287 rte_free(op_params->q_bufs[j][i].hard_outputs);
1288 rte_free(op_params->q_bufs[j][i].soft_outputs);
1289 rte_free(op_params->q_bufs[j][i].harq_inputs);
1290 rte_free(op_params->q_bufs[j][i].harq_outputs);
1296 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
1297 unsigned int start_idx,
1298 struct rte_bbdev_op_data *inputs,
1299 struct rte_bbdev_op_data *hard_outputs,
1300 struct rte_bbdev_op_data *soft_outputs,
1301 struct rte_bbdev_dec_op *ref_op)
1304 struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
1306 for (i = 0; i < n; ++i) {
1307 if (turbo_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1308 ops[i]->turbo_dec.tb_params.ea =
1309 turbo_dec->tb_params.ea;
1310 ops[i]->turbo_dec.tb_params.eb =
1311 turbo_dec->tb_params.eb;
1312 ops[i]->turbo_dec.tb_params.k_pos =
1313 turbo_dec->tb_params.k_pos;
1314 ops[i]->turbo_dec.tb_params.k_neg =
1315 turbo_dec->tb_params.k_neg;
1316 ops[i]->turbo_dec.tb_params.c =
1317 turbo_dec->tb_params.c;
1318 ops[i]->turbo_dec.tb_params.c_neg =
1319 turbo_dec->tb_params.c_neg;
1320 ops[i]->turbo_dec.tb_params.cab =
1321 turbo_dec->tb_params.cab;
1322 ops[i]->turbo_dec.tb_params.r =
1323 turbo_dec->tb_params.r;
1325 ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
1326 ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
1329 ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
1330 ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
1331 ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
1332 ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
1333 ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
1334 ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
1335 ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
1337 ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
1338 ops[i]->turbo_dec.input = inputs[start_idx + i];
1339 if (soft_outputs != NULL)
1340 ops[i]->turbo_dec.soft_output =
1341 soft_outputs[start_idx + i];
1346 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
1347 unsigned int start_idx,
1348 struct rte_bbdev_op_data *inputs,
1349 struct rte_bbdev_op_data *outputs,
1350 struct rte_bbdev_enc_op *ref_op)
1353 struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
1354 for (i = 0; i < n; ++i) {
1355 if (turbo_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1356 ops[i]->turbo_enc.tb_params.ea =
1357 turbo_enc->tb_params.ea;
1358 ops[i]->turbo_enc.tb_params.eb =
1359 turbo_enc->tb_params.eb;
1360 ops[i]->turbo_enc.tb_params.k_pos =
1361 turbo_enc->tb_params.k_pos;
1362 ops[i]->turbo_enc.tb_params.k_neg =
1363 turbo_enc->tb_params.k_neg;
1364 ops[i]->turbo_enc.tb_params.c =
1365 turbo_enc->tb_params.c;
1366 ops[i]->turbo_enc.tb_params.c_neg =
1367 turbo_enc->tb_params.c_neg;
1368 ops[i]->turbo_enc.tb_params.cab =
1369 turbo_enc->tb_params.cab;
1370 ops[i]->turbo_enc.tb_params.ncb_pos =
1371 turbo_enc->tb_params.ncb_pos;
1372 ops[i]->turbo_enc.tb_params.ncb_neg =
1373 turbo_enc->tb_params.ncb_neg;
1374 ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
1376 ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
1377 ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
1378 ops[i]->turbo_enc.cb_params.ncb =
1379 turbo_enc->cb_params.ncb;
1381 ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
1382 ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
1383 ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
1385 ops[i]->turbo_enc.output = outputs[start_idx + i];
1386 ops[i]->turbo_enc.input = inputs[start_idx + i];
1391 /* Returns a random number drawn from a normal distribution
1392 * with mean of 0 and variance of 1
1393 * Marsaglia algorithm
1398 double S, Z, U1, U2, u, v, fac;
1401 U1 = (double)rand() / RAND_MAX;
1402 U2 = (double)rand() / RAND_MAX;
1406 } while (S >= 1 || S == 0);
1407 fac = sqrt(-2. * log(S) / S);
1408 Z = (n % 2) ? u * fac : v * fac;
1412 static inline double
1413 maxstar(double A, double B)
1415 if (fabs(A - B) > 5)
1416 return RTE_MAX(A, B);
1418 return RTE_MAX(A, B) + log1p(exp(-fabs(A - B)));
1422 * Generate Qm LLRS for Qm==8
1423 * Modulation, AWGN and LLR estimation from max log development
1426 gen_qm8_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1431 double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1432 /* 5.1.4 of TS38.211 */
1433 const double symbols_I[256] = {
1434 5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 5,
1435 5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 11,
1436 11, 9, 9, 11, 11, 9, 9, 13, 13, 15, 15, 13, 13,
1437 15, 15, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13, 15,
1438 15, 13, 13, 15, 15, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3,
1439 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1,
1440 1, 3, 3, 1, 1, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13,
1441 15, 15, 13, 13, 15, 15, 11, 11, 9, 9, 11, 11, 9, 9,
1442 13, 13, 15, 15, 13, 13, 15, 15, -5, -5, -7, -7, -5,
1443 -5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -5, -5,
1444 -7, -7, -5, -5, -7, -7, -3, -3, -1, -1, -3, -3,
1445 -1, -1, -11, -11, -9, -9, -11, -11, -9, -9, -13,
1446 -13, -15, -15, -13, -13, -15, -15, -11, -11, -9,
1447 -9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
1448 -13, -15, -15, -5, -5, -7, -7, -5, -5, -7, -7, -3,
1449 -3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7, -5, -5,
1450 -7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -11, -11,
1451 -9, -9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
1452 -13, -15, -15, -11, -11, -9, -9, -11, -11, -9, -9,
1453 -13, -13, -15, -15, -13, -13, -15, -15};
1454 const double symbols_Q[256] = {
1455 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
1456 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15, 13,
1457 15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
1458 11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13,
1459 15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1, -5,
1460 -7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13,
1461 -15, -13, -15, -11, -9, -11, -9, -13, -15, -13,
1462 -15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7, -5,
1463 -7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
1464 -13, -15, -11, -9, -11, -9, -13, -15, -13, -15, 5,
1465 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
1466 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15,
1467 13, 15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1,
1468 3, 1, 11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9,
1469 13, 15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1,
1470 -5, -7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9,
1471 -13, -15, -13, -15, -11, -9, -11, -9, -13, -15,
1472 -13, -15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7,
1473 -5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
1474 -13, -15, -11, -9, -11, -9, -13, -15, -13, -15};
1475 /* Average constellation point energy */
1477 for (k = 0; k < qm; k++)
1478 b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1479 /* 5.1.4 of TS38.211 */
1480 I = (1 - 2 * b[0]) * (8 - (1 - 2 * b[2]) *
1481 (4 - (1 - 2 * b[4]) * (2 - (1 - 2 * b[6]))));
1482 Q = (1 - 2 * b[1]) * (8 - (1 - 2 * b[3]) *
1483 (4 - (1 - 2 * b[5]) * (2 - (1 - 2 * b[7]))));
1485 I += sqrt(N0 / 2) * randn(0);
1486 Q += sqrt(N0 / 2) * randn(1);
1488 * Calculate the log of the probability that each of
1489 * the constellation points was transmitted
1491 for (m = 0; m < qam; m++)
1492 log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1493 + pow(Q - symbols_Q[m], 2.0)) / N0;
1494 /* Calculate an LLR for each of the k_64QAM bits in the set */
1495 for (k = 0; k < qm; k++) {
1498 /* For each constellation point */
1499 for (m = 0; m < qam; m++) {
1500 if ((m >> (qm - k - 1)) & 1)
1501 p1 = maxstar(p1, log_syml_prob[m]);
1503 p0 = maxstar(p0, log_syml_prob[m]);
1505 /* Calculate the LLR */
1507 llr_ *= (1 << ldpc_llr_decimals);
1511 if (llr_ < -llr_max)
1513 llrs[qm * i + k] = (int8_t) llr_;
1519 * Generate Qm LLRS for Qm==6
1520 * Modulation, AWGN and LLR estimation from max log development
1523 gen_qm6_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1528 double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1529 /* 5.1.4 of TS38.211 */
1530 const double symbols_I[64] = {
1531 3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
1532 3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
1533 -3, -3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7,
1534 -5, -5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1,
1535 -5, -5, -7, -7, -5, -5, -7, -7};
1536 const double symbols_Q[64] = {
1537 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7,
1538 -3, -1, -3, -1, -5, -7, -5, -7, -3, -1, -3, -1,
1539 -5, -7, -5, -7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
1540 5, 7, 5, 7, -3, -1, -3, -1, -5, -7, -5, -7,
1541 -3, -1, -3, -1, -5, -7, -5, -7};
1542 /* Average constellation point energy */
1544 for (k = 0; k < qm; k++)
1545 b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1546 /* 5.1.4 of TS38.211 */
1547 I = (1 - 2 * b[0])*(4 - (1 - 2 * b[2]) * (2 - (1 - 2 * b[4])));
1548 Q = (1 - 2 * b[1])*(4 - (1 - 2 * b[3]) * (2 - (1 - 2 * b[5])));
1550 I += sqrt(N0 / 2) * randn(0);
1551 Q += sqrt(N0 / 2) * randn(1);
1553 * Calculate the log of the probability that each of
1554 * the constellation points was transmitted
1556 for (m = 0; m < qam; m++)
1557 log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1558 + pow(Q - symbols_Q[m], 2.0)) / N0;
1559 /* Calculate an LLR for each of the k_64QAM bits in the set */
1560 for (k = 0; k < qm; k++) {
1563 /* For each constellation point */
1564 for (m = 0; m < qam; m++) {
1565 if ((m >> (qm - k - 1)) & 1)
1566 p1 = maxstar(p1, log_syml_prob[m]);
1568 p0 = maxstar(p0, log_syml_prob[m]);
1570 /* Calculate the LLR */
1572 llr_ *= (1 << ldpc_llr_decimals);
1576 if (llr_ < -llr_max)
1578 llrs[qm * i + k] = (int8_t) llr_;
1583 * Generate Qm LLRS for Qm==4
1584 * Modulation, AWGN and LLR estimation from max log development
1587 gen_qm4_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1592 double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1593 /* 5.1.4 of TS38.211 */
1594 const double symbols_I[16] = {1, 1, 3, 3, 1, 1, 3, 3,
1595 -1, -1, -3, -3, -1, -1, -3, -3};
1596 const double symbols_Q[16] = {1, 3, 1, 3, -1, -3, -1, -3,
1597 1, 3, 1, 3, -1, -3, -1, -3};
1598 /* Average constellation point energy */
1600 for (k = 0; k < qm; k++)
1601 b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1602 /* 5.1.4 of TS38.211 */
1603 I = (1 - 2 * b[0]) * (2 - (1 - 2 * b[2]));
1604 Q = (1 - 2 * b[1]) * (2 - (1 - 2 * b[3]));
1606 I += sqrt(N0 / 2) * randn(0);
1607 Q += sqrt(N0 / 2) * randn(1);
1609 * Calculate the log of the probability that each of
1610 * the constellation points was transmitted
1612 for (m = 0; m < qam; m++)
1613 log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1614 + pow(Q - symbols_Q[m], 2.0)) / N0;
1615 /* Calculate an LLR for each of the k_64QAM bits in the set */
1616 for (k = 0; k < qm; k++) {
1619 /* For each constellation point */
1620 for (m = 0; m < qam; m++) {
1621 if ((m >> (qm - k - 1)) & 1)
1622 p1 = maxstar(p1, log_syml_prob[m]);
1624 p0 = maxstar(p0, log_syml_prob[m]);
1626 /* Calculate the LLR */
1628 llr_ *= (1 << ldpc_llr_decimals);
1632 if (llr_ < -llr_max)
1634 llrs[qm * i + k] = (int8_t) llr_;
1639 gen_qm2_llr(int8_t *llrs, uint32_t j, double N0, double llr_max)
1642 double coeff = 2.0 * sqrt(N0);
1644 /* Ignore in vectors rare quasi null LLRs not to be saturated */
1645 if (llrs[j] < 8 && llrs[j] > -8)
1648 /* Note don't change sign here */
1650 b1 = ((llrs[j] > 0 ? 2.0 : -2.0)
1652 b = b1 * (1 << ldpc_llr_decimals);
1658 llrs[j] = (int8_t) b;
1661 /* Generate LLR for a given SNR */
1663 generate_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs,
1664 struct rte_bbdev_dec_op *ref_op)
1668 uint32_t i, j, e, range;
1671 e = ref_op->ldpc_dec.cb_params.e;
1672 qm = ref_op->ldpc_dec.q_m;
1673 llr_max = (1 << (ldpc_llr_size - 1)) - 1;
1675 N0 = 1.0 / pow(10.0, get_snr() / 10.0);
1677 for (i = 0; i < n; ++i) {
1679 int8_t *llrs = rte_pktmbuf_mtod_offset(m, int8_t *, 0);
1681 for (j = 0; j < range; ++j)
1682 gen_qm8_llr(llrs, j, N0, llr_max);
1683 } else if (qm == 6) {
1684 for (j = 0; j < range; ++j)
1685 gen_qm6_llr(llrs, j, N0, llr_max);
1686 } else if (qm == 4) {
1687 for (j = 0; j < range; ++j)
1688 gen_qm4_llr(llrs, j, N0, llr_max);
1690 for (j = 0; j < e; ++j)
1691 gen_qm2_llr(llrs, j, N0, llr_max);
1697 copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
1698 unsigned int start_idx,
1699 struct rte_bbdev_op_data *inputs,
1700 struct rte_bbdev_op_data *hard_outputs,
1701 struct rte_bbdev_op_data *soft_outputs,
1702 struct rte_bbdev_op_data *harq_inputs,
1703 struct rte_bbdev_op_data *harq_outputs,
1704 struct rte_bbdev_dec_op *ref_op)
1707 struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
1709 for (i = 0; i < n; ++i) {
1710 if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1711 ops[i]->ldpc_dec.tb_params.ea =
1712 ldpc_dec->tb_params.ea;
1713 ops[i]->ldpc_dec.tb_params.eb =
1714 ldpc_dec->tb_params.eb;
1715 ops[i]->ldpc_dec.tb_params.c =
1716 ldpc_dec->tb_params.c;
1717 ops[i]->ldpc_dec.tb_params.cab =
1718 ldpc_dec->tb_params.cab;
1719 ops[i]->ldpc_dec.tb_params.r =
1720 ldpc_dec->tb_params.r;
1722 ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
1725 ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
1726 ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
1727 ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
1728 ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
1729 ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
1730 ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
1731 ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
1732 ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
1733 ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
1735 if (hard_outputs != NULL)
1736 ops[i]->ldpc_dec.hard_output =
1737 hard_outputs[start_idx + i];
1739 ops[i]->ldpc_dec.input =
1740 inputs[start_idx + i];
1741 if (soft_outputs != NULL)
1742 ops[i]->ldpc_dec.soft_output =
1743 soft_outputs[start_idx + i];
1744 if (harq_inputs != NULL)
1745 ops[i]->ldpc_dec.harq_combined_input =
1746 harq_inputs[start_idx + i];
1747 if (harq_outputs != NULL)
1748 ops[i]->ldpc_dec.harq_combined_output =
1749 harq_outputs[start_idx + i];
1755 copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
1756 unsigned int start_idx,
1757 struct rte_bbdev_op_data *inputs,
1758 struct rte_bbdev_op_data *outputs,
1759 struct rte_bbdev_enc_op *ref_op)
1762 struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
1763 for (i = 0; i < n; ++i) {
1764 if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1765 ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
1766 ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
1767 ops[i]->ldpc_enc.tb_params.cab =
1768 ldpc_enc->tb_params.cab;
1769 ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
1770 ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
1772 ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
1774 ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
1775 ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
1776 ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
1777 ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
1778 ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
1779 ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
1780 ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
1781 ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
1782 ops[i]->ldpc_enc.output = outputs[start_idx + i];
1783 ops[i]->ldpc_enc.input = inputs[start_idx + i];
1788 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
1789 unsigned int order_idx, const int expected_status)
1791 int status = op->status;
1792 /* ignore parity mismatch false alarms for long iterations */
1793 if (get_iter_max() >= 10) {
1794 if (!(expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
1795 (status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
1796 printf("WARNING: Ignore Syndrome Check mismatch\n");
1797 status -= (1 << RTE_BBDEV_SYNDROME_ERROR);
1799 if ((expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
1800 !(status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
1801 printf("WARNING: Ignore Syndrome Check mismatch\n");
1802 status += (1 << RTE_BBDEV_SYNDROME_ERROR);
1806 TEST_ASSERT(status == expected_status,
1807 "op_status (%d) != expected_status (%d)",
1808 op->status, expected_status);
1810 TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1811 "Ordering error, expected %p, got %p",
1812 (void *)(uintptr_t)order_idx, op->opaque_data);
1814 return TEST_SUCCESS;
1818 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
1819 unsigned int order_idx, const int expected_status)
1821 TEST_ASSERT(op->status == expected_status,
1822 "op_status (%d) != expected_status (%d)",
1823 op->status, expected_status);
1825 if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE)
1826 TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1827 "Ordering error, expected %p, got %p",
1828 (void *)(uintptr_t)order_idx, op->opaque_data);
1830 return TEST_SUCCESS;
1834 validate_op_chain(struct rte_bbdev_op_data *op,
1835 struct op_data_entries *orig_op)
1838 struct rte_mbuf *m = op->data;
1839 uint8_t nb_dst_segments = orig_op->nb_segments;
1840 uint32_t total_data_size = 0;
1842 TEST_ASSERT(nb_dst_segments == m->nb_segs,
1843 "Number of segments differ in original (%u) and filled (%u) op",
1844 nb_dst_segments, m->nb_segs);
1846 /* Validate each mbuf segment length */
1847 for (i = 0; i < nb_dst_segments; ++i) {
1848 /* Apply offset to the first mbuf segment */
1849 uint16_t offset = (i == 0) ? op->offset : 0;
1850 uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
1851 total_data_size += orig_op->segments[i].length;
1853 TEST_ASSERT(orig_op->segments[i].length == data_len,
1854 "Length of segment differ in original (%u) and filled (%u) op",
1855 orig_op->segments[i].length, data_len);
1856 TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
1857 rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
1859 "Output buffers (CB=%u) are not equal", i);
1863 /* Validate total mbuf pkt length */
1864 uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
1865 TEST_ASSERT(total_data_size == pkt_len,
1866 "Length of data differ in original (%u) and filled (%u) op",
1867 total_data_size, pkt_len);
1869 return TEST_SUCCESS;
1873 * Compute K0 for a given configuration for HARQ output length computation
1874 * As per definition in 3GPP 38.212 Table 5.4.2.1-2
1876 static inline uint16_t
1877 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1881 uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1884 return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
1885 else if (rv_index == 2)
1886 return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
1888 return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
1890 /* LBRM case - includes a division by N */
1892 return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
1894 else if (rv_index == 2)
1895 return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
1898 return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
1902 /* HARQ output length including the Filler bits */
1903 static inline uint16_t
1904 compute_harq_len(struct rte_bbdev_op_ldpc_dec *ops_ld)
1907 uint8_t max_rv = (ops_ld->rv_index == 1) ? 3 : ops_ld->rv_index;
1908 k0 = get_k0(ops_ld->n_cb, ops_ld->z_c, ops_ld->basegraph, max_rv);
1909 /* Compute RM out size and number of rows */
1910 uint16_t parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
1911 * ops_ld->z_c - ops_ld->n_filler;
1912 uint16_t deRmOutSize = RTE_MIN(
1913 k0 + ops_ld->cb_params.e +
1914 ((k0 > parity_offset) ?
1915 0 : ops_ld->n_filler),
1917 uint16_t numRows = ((deRmOutSize + ops_ld->z_c - 1)
1919 uint16_t harq_output_len = numRows * ops_ld->z_c;
1920 return harq_output_len;
1924 validate_op_harq_chain(struct rte_bbdev_op_data *op,
1925 struct op_data_entries *orig_op,
1926 struct rte_bbdev_op_ldpc_dec *ops_ld)
1930 struct rte_mbuf *m = op->data;
1931 uint8_t nb_dst_segments = orig_op->nb_segments;
1932 uint32_t total_data_size = 0;
1933 int8_t *harq_orig, *harq_out, abs_harq_origin;
1934 uint32_t byte_error = 0, cum_error = 0, error;
1935 int16_t llr_max = (1 << (ldpc_llr_size - ldpc_llr_decimals)) - 1;
1936 int16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
1937 uint16_t parity_offset;
1939 TEST_ASSERT(nb_dst_segments == m->nb_segs,
1940 "Number of segments differ in original (%u) and filled (%u) op",
1941 nb_dst_segments, m->nb_segs);
1943 /* Validate each mbuf segment length */
1944 for (i = 0; i < nb_dst_segments; ++i) {
1945 /* Apply offset to the first mbuf segment */
1946 uint16_t offset = (i == 0) ? op->offset : 0;
1947 uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
1948 total_data_size += orig_op->segments[i].length;
1950 TEST_ASSERT(orig_op->segments[i].length <
1951 (uint32_t)(data_len + 64),
1952 "Length of segment differ in original (%u) and filled (%u) op",
1953 orig_op->segments[i].length, data_len);
1954 harq_orig = (int8_t *) orig_op->segments[i].addr;
1955 harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset);
1957 if (!(ldpc_cap_flags &
1958 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS
1959 ) || (ops_ld->op_flags &
1960 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1961 data_len -= ops_ld->z_c;
1962 parity_offset = data_len;
1964 /* Compute RM out size and number of rows */
1965 parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
1966 * ops_ld->z_c - ops_ld->n_filler;
1967 uint16_t deRmOutSize = compute_harq_len(ops_ld) -
1969 if (data_len > deRmOutSize)
1970 data_len = deRmOutSize;
1971 if (data_len > orig_op->segments[i].length)
1972 data_len = orig_op->segments[i].length;
1975 * HARQ output can have minor differences
1976 * due to integer representation and related scaling
1978 for (j = 0, jj = 0; j < data_len; j++, jj++) {
1979 if (j == parity_offset) {
1980 /* Special Handling of the filler bits */
1981 for (k = 0; k < ops_ld->n_filler; k++) {
1983 llr_max_pre_scaling) {
1984 printf("HARQ Filler issue %d: %d %d\n",
1992 if (!(ops_ld->op_flags &
1993 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1994 if (ldpc_llr_decimals > 1)
1995 harq_out[jj] = (harq_out[jj] + 1)
1996 >> (ldpc_llr_decimals - 1);
1997 /* Saturated to S7 */
1998 if (harq_orig[j] > llr_max)
1999 harq_orig[j] = llr_max;
2000 if (harq_orig[j] < -llr_max)
2001 harq_orig[j] = -llr_max;
2003 if (harq_orig[j] != harq_out[jj]) {
2004 error = (harq_orig[j] > harq_out[jj]) ?
2005 harq_orig[j] - harq_out[jj] :
2006 harq_out[jj] - harq_orig[j];
2007 abs_harq_origin = harq_orig[j] > 0 ?
2010 /* Residual quantization error */
2011 if ((error > 8 && (abs_harq_origin <
2014 printf("HARQ mismatch %d: exp %d act %d => %d\n",
2016 harq_out[jj], error);
2026 TEST_ASSERT(byte_error <= 1,
2027 "HARQ output mismatch (%d) %d",
2028 byte_error, cum_error);
2030 /* Validate total mbuf pkt length */
2031 uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
2032 TEST_ASSERT(total_data_size < pkt_len + 64,
2033 "Length of data differ in original (%u) and filled (%u) op",
2034 total_data_size, pkt_len);
2036 return TEST_SUCCESS;
2040 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
2041 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
2045 struct op_data_entries *hard_data_orig =
2046 &test_vector.entries[DATA_HARD_OUTPUT];
2047 struct op_data_entries *soft_data_orig =
2048 &test_vector.entries[DATA_SOFT_OUTPUT];
2049 struct rte_bbdev_op_turbo_dec *ops_td;
2050 struct rte_bbdev_op_data *hard_output;
2051 struct rte_bbdev_op_data *soft_output;
2052 struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
2054 for (i = 0; i < n; ++i) {
2055 ops_td = &ops[i]->turbo_dec;
2056 hard_output = &ops_td->hard_output;
2057 soft_output = &ops_td->soft_output;
2059 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
2060 TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
2061 "Returned iter_count (%d) > expected iter_count (%d)",
2062 ops_td->iter_count, ref_td->iter_count);
2063 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
2064 TEST_ASSERT_SUCCESS(ret,
2065 "Checking status and ordering for decoder failed");
2067 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
2069 "Hard output buffers (CB=%u) are not equal",
2072 if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
2073 TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
2075 "Soft output buffers (CB=%u) are not equal",
2079 return TEST_SUCCESS;
2082 /* Check Number of code blocks errors */
2084 validate_ldpc_bler(struct rte_bbdev_dec_op **ops, const uint16_t n)
2087 struct op_data_entries *hard_data_orig =
2088 &test_vector.entries[DATA_HARD_OUTPUT];
2089 struct rte_bbdev_op_ldpc_dec *ops_td;
2090 struct rte_bbdev_op_data *hard_output;
2094 for (i = 0; i < n; ++i) {
2095 ops_td = &ops[i]->ldpc_dec;
2096 hard_output = &ops_td->hard_output;
2097 m = hard_output->data;
2098 if (memcmp(rte_pktmbuf_mtod_offset(m, uint32_t *, 0),
2099 hard_data_orig->segments[0].addr,
2100 hard_data_orig->segments[0].length))
2107 validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
2108 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
2112 struct op_data_entries *hard_data_orig =
2113 &test_vector.entries[DATA_HARD_OUTPUT];
2114 struct op_data_entries *soft_data_orig =
2115 &test_vector.entries[DATA_SOFT_OUTPUT];
2116 struct op_data_entries *harq_data_orig =
2117 &test_vector.entries[DATA_HARQ_OUTPUT];
2118 struct rte_bbdev_op_ldpc_dec *ops_td;
2119 struct rte_bbdev_op_data *hard_output;
2120 struct rte_bbdev_op_data *harq_output;
2121 struct rte_bbdev_op_data *soft_output;
2122 struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
2124 for (i = 0; i < n; ++i) {
2125 ops_td = &ops[i]->ldpc_dec;
2126 hard_output = &ops_td->hard_output;
2127 harq_output = &ops_td->harq_combined_output;
2128 soft_output = &ops_td->soft_output;
2130 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
2131 TEST_ASSERT_SUCCESS(ret,
2132 "Checking status and ordering for decoder failed");
2133 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
2134 TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
2135 "Returned iter_count (%d) > expected iter_count (%d)",
2136 ops_td->iter_count, ref_td->iter_count);
2138 * We can ignore output data when the decoding failed to
2139 * converge or for loop-back cases
2141 if (!check_bit(ops[i]->ldpc_dec.op_flags,
2142 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
2144 ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR
2146 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
2148 "Hard output buffers (CB=%u) are not equal",
2151 if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
2152 TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
2154 "Soft output buffers (CB=%u) are not equal",
2156 if (ref_op->ldpc_dec.op_flags &
2157 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
2158 TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
2159 harq_data_orig, ops_td),
2160 "HARQ output buffers (CB=%u) are not equal",
2163 if (ref_op->ldpc_dec.op_flags &
2164 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
2165 TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
2166 harq_data_orig, ops_td),
2167 "HARQ output buffers (CB=%u) are not equal",
2172 return TEST_SUCCESS;
2177 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
2178 struct rte_bbdev_enc_op *ref_op)
2182 struct op_data_entries *hard_data_orig =
2183 &test_vector.entries[DATA_HARD_OUTPUT];
2185 for (i = 0; i < n; ++i) {
2186 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
2187 TEST_ASSERT_SUCCESS(ret,
2188 "Checking status and ordering for encoder failed");
2189 TEST_ASSERT_SUCCESS(validate_op_chain(
2190 &ops[i]->turbo_enc.output,
2192 "Output buffers (CB=%u) are not equal",
2196 return TEST_SUCCESS;
2200 validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
2201 struct rte_bbdev_enc_op *ref_op)
2205 struct op_data_entries *hard_data_orig =
2206 &test_vector.entries[DATA_HARD_OUTPUT];
2208 for (i = 0; i < n; ++i) {
2209 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
2210 TEST_ASSERT_SUCCESS(ret,
2211 "Checking status and ordering for encoder failed");
2212 TEST_ASSERT_SUCCESS(validate_op_chain(
2213 &ops[i]->ldpc_enc.output,
2215 "Output buffers (CB=%u) are not equal",
2219 return TEST_SUCCESS;
2223 create_reference_dec_op(struct rte_bbdev_dec_op *op)
2226 struct op_data_entries *entry;
2228 op->turbo_dec = test_vector.turbo_dec;
2229 entry = &test_vector.entries[DATA_INPUT];
2230 for (i = 0; i < entry->nb_segments; ++i)
2231 op->turbo_dec.input.length +=
2232 entry->segments[i].length;
2236 create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
2239 struct op_data_entries *entry;
2241 op->ldpc_dec = test_vector.ldpc_dec;
2242 entry = &test_vector.entries[DATA_INPUT];
2243 for (i = 0; i < entry->nb_segments; ++i)
2244 op->ldpc_dec.input.length +=
2245 entry->segments[i].length;
2246 if (test_vector.ldpc_dec.op_flags &
2247 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
2248 entry = &test_vector.entries[DATA_HARQ_INPUT];
2249 for (i = 0; i < entry->nb_segments; ++i)
2250 op->ldpc_dec.harq_combined_input.length +=
2251 entry->segments[i].length;
2257 create_reference_enc_op(struct rte_bbdev_enc_op *op)
2260 struct op_data_entries *entry;
2262 op->turbo_enc = test_vector.turbo_enc;
2263 entry = &test_vector.entries[DATA_INPUT];
2264 for (i = 0; i < entry->nb_segments; ++i)
2265 op->turbo_enc.input.length +=
2266 entry->segments[i].length;
2270 create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
2273 struct op_data_entries *entry;
2275 op->ldpc_enc = test_vector.ldpc_enc;
2276 entry = &test_vector.entries[DATA_INPUT];
2277 for (i = 0; i < entry->nb_segments; ++i)
2278 op->ldpc_enc.input.length +=
2279 entry->segments[i].length;
2283 calc_dec_TB_size(struct rte_bbdev_dec_op *op)
2286 uint32_t c, r, tb_size = 0;
2288 if (op->turbo_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
2289 tb_size = op->turbo_dec.tb_params.k_neg;
2291 c = op->turbo_dec.tb_params.c;
2292 r = op->turbo_dec.tb_params.r;
2293 for (i = 0; i < c-r; i++)
2294 tb_size += (r < op->turbo_dec.tb_params.c_neg) ?
2295 op->turbo_dec.tb_params.k_neg :
2296 op->turbo_dec.tb_params.k_pos;
2302 calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
2305 uint32_t c, r, tb_size = 0;
2306 uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
2308 if (op->ldpc_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
2309 tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
2311 c = op->ldpc_dec.tb_params.c;
2312 r = op->ldpc_dec.tb_params.r;
2313 for (i = 0; i < c-r; i++)
2314 tb_size += sys_cols * op->ldpc_dec.z_c
2315 - op->ldpc_dec.n_filler;
2321 calc_enc_TB_size(struct rte_bbdev_enc_op *op)
2324 uint32_t c, r, tb_size = 0;
2326 if (op->turbo_enc.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
2327 tb_size = op->turbo_enc.tb_params.k_neg;
2329 c = op->turbo_enc.tb_params.c;
2330 r = op->turbo_enc.tb_params.r;
2331 for (i = 0; i < c-r; i++)
2332 tb_size += (r < op->turbo_enc.tb_params.c_neg) ?
2333 op->turbo_enc.tb_params.k_neg :
2334 op->turbo_enc.tb_params.k_pos;
2340 calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
2343 uint32_t c, r, tb_size = 0;
2344 uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
2346 if (op->ldpc_enc.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
2347 tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
2349 c = op->turbo_enc.tb_params.c;
2350 r = op->turbo_enc.tb_params.r;
2351 for (i = 0; i < c-r; i++)
2352 tb_size += sys_cols * op->ldpc_enc.z_c
2353 - op->ldpc_enc.n_filler;
2360 init_test_op_params(struct test_op_params *op_params,
2361 enum rte_bbdev_op_type op_type, const int expected_status,
2362 const int vector_mask, struct rte_mempool *ops_mp,
2363 uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
2366 if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
2367 op_type == RTE_BBDEV_OP_LDPC_DEC)
2368 ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
2369 &op_params->ref_dec_op, 1);
2371 ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
2372 &op_params->ref_enc_op, 1);
2374 TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
2376 op_params->mp = ops_mp;
2377 op_params->burst_sz = burst_sz;
2378 op_params->num_to_process = num_to_process;
2379 op_params->num_lcores = num_lcores;
2380 op_params->vector_mask = vector_mask;
2381 if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
2382 op_type == RTE_BBDEV_OP_LDPC_DEC)
2383 op_params->ref_dec_op->status = expected_status;
2384 else if (op_type == RTE_BBDEV_OP_TURBO_ENC
2385 || op_type == RTE_BBDEV_OP_LDPC_ENC)
2386 op_params->ref_enc_op->status = expected_status;
2391 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
2392 struct test_op_params *op_params)
2394 int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
2396 struct active_device *ad;
2397 unsigned int burst_sz = get_burst_sz();
2398 enum rte_bbdev_op_type op_type = test_vector.op_type;
2399 const struct rte_bbdev_op_cap *capabilities = NULL;
2401 ad = &active_devs[dev_id];
2403 /* Check if device supports op_type */
2404 if (!is_avail_op(ad, test_vector.op_type))
2405 return TEST_SUCCESS;
2407 struct rte_bbdev_info info;
2408 rte_bbdev_info_get(ad->dev_id, &info);
2409 socket_id = GET_SOCKET(info.socket_id);
2411 f_ret = create_mempools(ad, socket_id, op_type,
2413 if (f_ret != TEST_SUCCESS) {
2414 printf("Couldn't create mempools");
2417 if (op_type == RTE_BBDEV_OP_NONE)
2418 op_type = RTE_BBDEV_OP_TURBO_ENC;
2420 f_ret = init_test_op_params(op_params, test_vector.op_type,
2421 test_vector.expected_status,
2427 if (f_ret != TEST_SUCCESS) {
2428 printf("Couldn't init test op params");
2433 /* Find capabilities */
2434 const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
2435 for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
2436 if (cap->type == test_vector.op_type) {
2442 TEST_ASSERT_NOT_NULL(capabilities,
2443 "Couldn't find capabilities");
2445 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
2446 create_reference_dec_op(op_params->ref_dec_op);
2447 } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
2448 create_reference_enc_op(op_params->ref_enc_op);
2449 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2450 create_reference_ldpc_enc_op(op_params->ref_enc_op);
2451 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2452 create_reference_ldpc_dec_op(op_params->ref_dec_op);
2454 for (i = 0; i < ad->nb_queues; ++i) {
2455 f_ret = fill_queue_buffers(op_params,
2457 ad->hard_out_mbuf_pool,
2458 ad->soft_out_mbuf_pool,
2459 ad->harq_in_mbuf_pool,
2460 ad->harq_out_mbuf_pool,
2463 info.drv.min_alignment,
2465 if (f_ret != TEST_SUCCESS) {
2466 printf("Couldn't init queue buffers");
2471 /* Run test case function */
2472 t_ret = test_case_func(ad, op_params);
2474 /* Free active device resources and return */
2475 free_buffers(ad, op_params);
2479 free_buffers(ad, op_params);
2483 /* Run given test function per active device per supported op type
2487 run_test_case(test_case_function *test_case_func)
2492 /* Alloc op_params */
2493 struct test_op_params *op_params = rte_zmalloc(NULL,
2494 sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
2495 TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
2496 RTE_ALIGN(sizeof(struct test_op_params),
2497 RTE_CACHE_LINE_SIZE));
2499 /* For each device run test case function */
2500 for (dev = 0; dev < nb_active_devs; ++dev)
2501 ret |= run_test_case_on_device(test_case_func, dev, op_params);
2503 rte_free(op_params);
2509 /* Push back the HARQ output from DDR to host */
2511 retrieve_harq_ddr(uint16_t dev_id, uint16_t queue_id,
2512 struct rte_bbdev_dec_op **ops,
2516 int save_status, ret;
2517 uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2518 struct rte_bbdev_dec_op *ops_deq[MAX_BURST];
2519 uint32_t flags = ops[0]->ldpc_dec.op_flags;
2520 bool loopback = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
2521 bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2522 bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
2523 bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2524 for (j = 0; j < n; ++j) {
2525 if ((loopback && mem_out) || hc_out) {
2526 save_status = ops[j]->status;
2527 ops[j]->ldpc_dec.op_flags =
2528 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
2529 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
2531 ops[j]->ldpc_dec.op_flags +=
2532 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2533 ops[j]->ldpc_dec.harq_combined_input.offset =
2535 ops[j]->ldpc_dec.harq_combined_output.offset = 0;
2536 harq_offset += HARQ_INCR;
2538 ops[j]->ldpc_dec.harq_combined_input.length =
2539 ops[j]->ldpc_dec.harq_combined_output.length;
2540 rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
2544 ret = rte_bbdev_dequeue_ldpc_dec_ops(
2547 ops[j]->ldpc_dec.op_flags = flags;
2548 ops[j]->status = save_status;
2554 * Push back the HARQ output from HW DDR to Host
2555 * Preload HARQ memory input and adjust HARQ offset
2558 preload_harq_ddr(uint16_t dev_id, uint16_t queue_id,
2559 struct rte_bbdev_dec_op **ops, const uint16_t n,
2564 uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2565 struct rte_bbdev_op_data save_hc_in[MAX_OPS], save_hc_out[MAX_OPS];
2566 struct rte_bbdev_dec_op *ops_deq[MAX_OPS];
2567 uint32_t flags = ops[0]->ldpc_dec.op_flags;
2568 bool mem_in = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
2569 bool hc_in = flags & RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;
2570 bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2571 bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
2572 bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2573 if ((mem_in || hc_in) && preload) {
2574 for (j = 0; j < n; ++j) {
2575 save_hc_in[j] = ops[j]->ldpc_dec.harq_combined_input;
2576 save_hc_out[j] = ops[j]->ldpc_dec.harq_combined_output;
2577 ops[j]->ldpc_dec.op_flags =
2578 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
2579 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2581 ops[j]->ldpc_dec.op_flags +=
2582 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2583 ops[j]->ldpc_dec.harq_combined_output.offset =
2585 ops[j]->ldpc_dec.harq_combined_input.offset = 0;
2586 harq_offset += HARQ_INCR;
2588 rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id, &ops[0], n);
2591 deq += rte_bbdev_dequeue_ldpc_dec_ops(
2592 dev_id, queue_id, &ops_deq[deq],
2594 /* Restore the operations */
2595 for (j = 0; j < n; ++j) {
2596 ops[j]->ldpc_dec.op_flags = flags;
2597 ops[j]->ldpc_dec.harq_combined_input = save_hc_in[j];
2598 ops[j]->ldpc_dec.harq_combined_output = save_hc_out[j];
2601 harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2602 for (j = 0; j < n; ++j) {
2603 /* Adjust HARQ offset when we reach external DDR */
2604 if (mem_in || hc_in)
2605 ops[j]->ldpc_dec.harq_combined_input.offset
2607 if (mem_out || hc_out)
2608 ops[j]->ldpc_dec.harq_combined_output.offset
2610 harq_offset += HARQ_INCR;
2615 dequeue_event_callback(uint16_t dev_id,
2616 enum rte_bbdev_event_type event, void *cb_arg,
2621 uint64_t total_time;
2622 uint16_t deq, burst_sz, num_ops;
2623 uint16_t queue_id = *(uint16_t *) ret_param;
2624 struct rte_bbdev_info info;
2626 struct thread_params *tp = cb_arg;
2628 /* Find matching thread params using queue_id */
2629 for (i = 0; i < MAX_QUEUES; ++i, ++tp)
2630 if (tp->queue_id == queue_id)
2633 if (i == MAX_QUEUES) {
2634 printf("%s: Queue_id from interrupt details was not found!\n",
2639 if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
2640 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2642 "Dequeue interrupt handler called for incorrect event!\n");
2646 burst_sz = rte_atomic16_read(&tp->burst_sz);
2647 num_ops = tp->op_params->num_to_process;
2649 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
2650 deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2652 rte_atomic16_read(&tp->nb_dequeued)],
2654 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2655 deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
2657 rte_atomic16_read(&tp->nb_dequeued)],
2659 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2660 deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
2662 rte_atomic16_read(&tp->nb_dequeued)],
2664 else /*RTE_BBDEV_OP_TURBO_ENC*/
2665 deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2667 rte_atomic16_read(&tp->nb_dequeued)],
2670 if (deq < burst_sz) {
2672 "After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
2674 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2678 if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_ops) {
2679 rte_atomic16_add(&tp->nb_dequeued, deq);
2683 total_time = rte_rdtsc_precise() - tp->start_time;
2685 rte_bbdev_info_get(dev_id, &info);
2689 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
2690 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2691 ret = validate_dec_op(tp->dec_ops, num_ops, ref_op,
2692 tp->op_params->vector_mask);
2693 /* get the max of iter_count for all dequeued ops */
2694 for (i = 0; i < num_ops; ++i)
2695 tp->iter_count = RTE_MAX(
2696 tp->dec_ops[i]->turbo_dec.iter_count,
2698 rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
2699 } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) {
2700 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
2701 ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
2702 rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
2703 } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
2704 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
2705 ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
2706 rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
2707 } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
2708 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2709 ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
2710 tp->op_params->vector_mask);
2711 rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
2715 printf("Buffers validation failed\n");
2716 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2719 switch (test_vector.op_type) {
2720 case RTE_BBDEV_OP_TURBO_DEC:
2721 tb_len_bits = calc_dec_TB_size(tp->op_params->ref_dec_op);
2723 case RTE_BBDEV_OP_TURBO_ENC:
2724 tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
2726 case RTE_BBDEV_OP_LDPC_DEC:
2727 tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
2729 case RTE_BBDEV_OP_LDPC_ENC:
2730 tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
2732 case RTE_BBDEV_OP_NONE:
2736 printf("Unknown op type: %d\n", test_vector.op_type);
2737 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2741 tp->ops_per_sec += ((double)num_ops) /
2742 ((double)total_time / (double)rte_get_tsc_hz());
2743 tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
2744 ((double)total_time / (double)rte_get_tsc_hz());
2746 rte_atomic16_add(&tp->nb_dequeued, deq);
2750 throughput_intr_lcore_ldpc_dec(void *arg)
2752 struct thread_params *tp = arg;
2753 unsigned int enqueued;
2754 const uint16_t queue_id = tp->queue_id;
2755 const uint16_t burst_sz = tp->op_params->burst_sz;
2756 const uint16_t num_to_process = tp->op_params->num_to_process;
2757 struct rte_bbdev_dec_op *ops[num_to_process];
2758 struct test_buffers *bufs = NULL;
2759 struct rte_bbdev_info info;
2761 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2762 uint16_t num_to_enq, enq;
2764 bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
2765 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
2766 bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
2767 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
2769 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2770 "BURST_SIZE should be <= %u", MAX_BURST);
2772 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2773 "Failed to enable interrupts for dev: %u, queue_id: %u",
2774 tp->dev_id, queue_id);
2776 rte_bbdev_info_get(tp->dev_id, &info);
2778 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
2779 "NUM_OPS cannot exceed %u for this device",
2780 info.drv.queue_size_lim);
2782 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2784 rte_atomic16_clear(&tp->processing_status);
2785 rte_atomic16_clear(&tp->nb_dequeued);
2787 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2790 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
2792 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2794 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2795 copy_reference_ldpc_dec_op(ops, num_to_process, 0, bufs->inputs,
2796 bufs->hard_outputs, bufs->soft_outputs,
2797 bufs->harq_inputs, bufs->harq_outputs, ref_op);
2799 /* Set counter to validate the ordering */
2800 for (j = 0; j < num_to_process; ++j)
2801 ops[j]->opaque_data = (void *)(uintptr_t)j;
2803 for (j = 0; j < TEST_REPETITIONS; ++j) {
2804 for (i = 0; i < num_to_process; ++i) {
2807 ops[i]->ldpc_dec.hard_output.data);
2808 if (hc_out || loopback)
2810 ops[i]->ldpc_dec.harq_combined_output.data);
2813 tp->start_time = rte_rdtsc_precise();
2814 for (enqueued = 0; enqueued < num_to_process;) {
2815 num_to_enq = burst_sz;
2817 if (unlikely(num_to_process - enqueued < num_to_enq))
2818 num_to_enq = num_to_process - enqueued;
2822 enq += rte_bbdev_enqueue_ldpc_dec_ops(
2824 queue_id, &ops[enqueued],
2826 } while (unlikely(num_to_enq != enq));
2829 /* Write to thread burst_sz current number of enqueued
2830 * descriptors. It ensures that proper number of
2831 * descriptors will be dequeued in callback
2832 * function - needed for last batch in case where
2833 * the number of operations is not a multiple of
2836 rte_atomic16_set(&tp->burst_sz, num_to_enq);
2838 /* Wait until processing of previous batch is
2841 while (rte_atomic16_read(&tp->nb_dequeued) !=
2845 if (j != TEST_REPETITIONS - 1)
2846 rte_atomic16_clear(&tp->nb_dequeued);
2849 return TEST_SUCCESS;
2853 throughput_intr_lcore_dec(void *arg)
2855 struct thread_params *tp = arg;
2856 unsigned int enqueued;
2857 const uint16_t queue_id = tp->queue_id;
2858 const uint16_t burst_sz = tp->op_params->burst_sz;
2859 const uint16_t num_to_process = tp->op_params->num_to_process;
2860 struct rte_bbdev_dec_op *ops[num_to_process];
2861 struct test_buffers *bufs = NULL;
2862 struct rte_bbdev_info info;
2864 uint16_t num_to_enq, enq;
2866 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2867 "BURST_SIZE should be <= %u", MAX_BURST);
2869 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2870 "Failed to enable interrupts for dev: %u, queue_id: %u",
2871 tp->dev_id, queue_id);
2873 rte_bbdev_info_get(tp->dev_id, &info);
2875 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
2876 "NUM_OPS cannot exceed %u for this device",
2877 info.drv.queue_size_lim);
2879 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2881 rte_atomic16_clear(&tp->processing_status);
2882 rte_atomic16_clear(&tp->nb_dequeued);
2884 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2887 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
2889 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2891 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2892 copy_reference_dec_op(ops, num_to_process, 0, bufs->inputs,
2893 bufs->hard_outputs, bufs->soft_outputs,
2894 tp->op_params->ref_dec_op);
2896 /* Set counter to validate the ordering */
2897 for (j = 0; j < num_to_process; ++j)
2898 ops[j]->opaque_data = (void *)(uintptr_t)j;
2900 for (j = 0; j < TEST_REPETITIONS; ++j) {
2901 for (i = 0; i < num_to_process; ++i)
2902 rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
2904 tp->start_time = rte_rdtsc_precise();
2905 for (enqueued = 0; enqueued < num_to_process;) {
2906 num_to_enq = burst_sz;
2908 if (unlikely(num_to_process - enqueued < num_to_enq))
2909 num_to_enq = num_to_process - enqueued;
2913 enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
2914 queue_id, &ops[enqueued],
2916 } while (unlikely(num_to_enq != enq));
2919 /* Write to thread burst_sz current number of enqueued
2920 * descriptors. It ensures that proper number of
2921 * descriptors will be dequeued in callback
2922 * function - needed for last batch in case where
2923 * the number of operations is not a multiple of
2926 rte_atomic16_set(&tp->burst_sz, num_to_enq);
2928 /* Wait until processing of previous batch is
2931 while (rte_atomic16_read(&tp->nb_dequeued) !=
2935 if (j != TEST_REPETITIONS - 1)
2936 rte_atomic16_clear(&tp->nb_dequeued);
2939 return TEST_SUCCESS;
2943 throughput_intr_lcore_enc(void *arg)
2945 struct thread_params *tp = arg;
2946 unsigned int enqueued;
2947 const uint16_t queue_id = tp->queue_id;
2948 const uint16_t burst_sz = tp->op_params->burst_sz;
2949 const uint16_t num_to_process = tp->op_params->num_to_process;
2950 struct rte_bbdev_enc_op *ops[num_to_process];
2951 struct test_buffers *bufs = NULL;
2952 struct rte_bbdev_info info;
2954 uint16_t num_to_enq, enq;
2956 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2957 "BURST_SIZE should be <= %u", MAX_BURST);
2959 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2960 "Failed to enable interrupts for dev: %u, queue_id: %u",
2961 tp->dev_id, queue_id);
2963 rte_bbdev_info_get(tp->dev_id, &info);
2965 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
2966 "NUM_OPS cannot exceed %u for this device",
2967 info.drv.queue_size_lim);
2969 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2971 rte_atomic16_clear(&tp->processing_status);
2972 rte_atomic16_clear(&tp->nb_dequeued);
2974 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2977 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
2979 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2981 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2982 copy_reference_enc_op(ops, num_to_process, 0, bufs->inputs,
2983 bufs->hard_outputs, tp->op_params->ref_enc_op);
2985 /* Set counter to validate the ordering */
2986 for (j = 0; j < num_to_process; ++j)
2987 ops[j]->opaque_data = (void *)(uintptr_t)j;
2989 for (j = 0; j < TEST_REPETITIONS; ++j) {
2990 for (i = 0; i < num_to_process; ++i)
2991 rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
2993 tp->start_time = rte_rdtsc_precise();
2994 for (enqueued = 0; enqueued < num_to_process;) {
2995 num_to_enq = burst_sz;
2997 if (unlikely(num_to_process - enqueued < num_to_enq))
2998 num_to_enq = num_to_process - enqueued;
3002 enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
3003 queue_id, &ops[enqueued],
3005 } while (unlikely(enq != num_to_enq));
3008 /* Write to thread burst_sz current number of enqueued
3009 * descriptors. It ensures that proper number of
3010 * descriptors will be dequeued in callback
3011 * function - needed for last batch in case where
3012 * the number of operations is not a multiple of
3015 rte_atomic16_set(&tp->burst_sz, num_to_enq);
3017 /* Wait until processing of previous batch is
3020 while (rte_atomic16_read(&tp->nb_dequeued) !=
3024 if (j != TEST_REPETITIONS - 1)
3025 rte_atomic16_clear(&tp->nb_dequeued);
3028 return TEST_SUCCESS;
3033 throughput_intr_lcore_ldpc_enc(void *arg)
3035 struct thread_params *tp = arg;
3036 unsigned int enqueued;
3037 const uint16_t queue_id = tp->queue_id;
3038 const uint16_t burst_sz = tp->op_params->burst_sz;
3039 const uint16_t num_to_process = tp->op_params->num_to_process;
3040 struct rte_bbdev_enc_op *ops[num_to_process];
3041 struct test_buffers *bufs = NULL;
3042 struct rte_bbdev_info info;
3044 uint16_t num_to_enq, enq;
3046 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3047 "BURST_SIZE should be <= %u", MAX_BURST);
3049 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
3050 "Failed to enable interrupts for dev: %u, queue_id: %u",
3051 tp->dev_id, queue_id);
3053 rte_bbdev_info_get(tp->dev_id, &info);
3055 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
3056 "NUM_OPS cannot exceed %u for this device",
3057 info.drv.queue_size_lim);
3059 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3061 rte_atomic16_clear(&tp->processing_status);
3062 rte_atomic16_clear(&tp->nb_dequeued);
3064 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3067 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
3069 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
3071 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3072 copy_reference_ldpc_enc_op(ops, num_to_process, 0,
3073 bufs->inputs, bufs->hard_outputs,
3074 tp->op_params->ref_enc_op);
3076 /* Set counter to validate the ordering */
3077 for (j = 0; j < num_to_process; ++j)
3078 ops[j]->opaque_data = (void *)(uintptr_t)j;
3080 for (j = 0; j < TEST_REPETITIONS; ++j) {
3081 for (i = 0; i < num_to_process; ++i)
3082 rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
3084 tp->start_time = rte_rdtsc_precise();
3085 for (enqueued = 0; enqueued < num_to_process;) {
3086 num_to_enq = burst_sz;
3088 if (unlikely(num_to_process - enqueued < num_to_enq))
3089 num_to_enq = num_to_process - enqueued;
3093 enq += rte_bbdev_enqueue_ldpc_enc_ops(
3095 queue_id, &ops[enqueued],
3097 } while (unlikely(enq != num_to_enq));
3100 /* Write to thread burst_sz current number of enqueued
3101 * descriptors. It ensures that proper number of
3102 * descriptors will be dequeued in callback
3103 * function - needed for last batch in case where
3104 * the number of operations is not a multiple of
3107 rte_atomic16_set(&tp->burst_sz, num_to_enq);
3109 /* Wait until processing of previous batch is
3112 while (rte_atomic16_read(&tp->nb_dequeued) !=
3116 if (j != TEST_REPETITIONS - 1)
3117 rte_atomic16_clear(&tp->nb_dequeued);
3120 return TEST_SUCCESS;
3124 throughput_pmd_lcore_dec(void *arg)
3126 struct thread_params *tp = arg;
3128 uint64_t total_time = 0, start_time;
3129 const uint16_t queue_id = tp->queue_id;
3130 const uint16_t burst_sz = tp->op_params->burst_sz;
3131 const uint16_t num_ops = tp->op_params->num_to_process;
3132 struct rte_bbdev_dec_op *ops_enq[num_ops];
3133 struct rte_bbdev_dec_op *ops_deq[num_ops];
3134 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
3135 struct test_buffers *bufs = NULL;
3137 struct rte_bbdev_info info;
3138 uint16_t num_to_enq;
3140 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3141 "BURST_SIZE should be <= %u", MAX_BURST);
3143 rte_bbdev_info_get(tp->dev_id, &info);
3145 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3146 "NUM_OPS cannot exceed %u for this device",
3147 info.drv.queue_size_lim);
3149 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3151 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3154 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
3155 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
3157 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3158 copy_reference_dec_op(ops_enq, num_ops, 0, bufs->inputs,
3159 bufs->hard_outputs, bufs->soft_outputs, ref_op);
3161 /* Set counter to validate the ordering */
3162 for (j = 0; j < num_ops; ++j)
3163 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3165 for (i = 0; i < TEST_REPETITIONS; ++i) {
3167 for (j = 0; j < num_ops; ++j)
3168 mbuf_reset(ops_enq[j]->turbo_dec.hard_output.data);
3170 start_time = rte_rdtsc_precise();
3172 for (enq = 0, deq = 0; enq < num_ops;) {
3173 num_to_enq = burst_sz;
3175 if (unlikely(num_ops - enq < num_to_enq))
3176 num_to_enq = num_ops - enq;
3178 enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
3179 queue_id, &ops_enq[enq], num_to_enq);
3181 deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
3182 queue_id, &ops_deq[deq], enq - deq);
3185 /* dequeue the remaining */
3187 deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
3188 queue_id, &ops_deq[deq], enq - deq);
3191 total_time += rte_rdtsc_precise() - start_time;
3195 /* get the max of iter_count for all dequeued ops */
3196 for (i = 0; i < num_ops; ++i) {
3197 tp->iter_count = RTE_MAX(ops_enq[i]->turbo_dec.iter_count,
3201 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
3202 ret = validate_dec_op(ops_deq, num_ops, ref_op,
3203 tp->op_params->vector_mask);
3204 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3207 rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
3209 double tb_len_bits = calc_dec_TB_size(ref_op);
3211 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3212 ((double)total_time / (double)rte_get_tsc_hz());
3213 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
3214 1000000.0) / ((double)total_time /
3215 (double)rte_get_tsc_hz());
3217 return TEST_SUCCESS;
3221 bler_pmd_lcore_ldpc_dec(void *arg)
3223 struct thread_params *tp = arg;
3225 uint64_t total_time = 0, start_time;
3226 const uint16_t queue_id = tp->queue_id;
3227 const uint16_t burst_sz = tp->op_params->burst_sz;
3228 const uint16_t num_ops = tp->op_params->num_to_process;
3229 struct rte_bbdev_dec_op *ops_enq[num_ops];
3230 struct rte_bbdev_dec_op *ops_deq[num_ops];
3231 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
3232 struct test_buffers *bufs = NULL;
3234 float parity_bler = 0;
3235 struct rte_bbdev_info info;
3236 uint16_t num_to_enq;
3237 bool extDdr = check_bit(ldpc_cap_flags,
3238 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
3239 bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
3240 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
3241 bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
3242 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
3244 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3245 "BURST_SIZE should be <= %u", MAX_BURST);
3247 rte_bbdev_info_get(tp->dev_id, &info);
3249 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3250 "NUM_OPS cannot exceed %u for this device",
3251 info.drv.queue_size_lim);
3253 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3255 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3258 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
3259 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
3261 /* For BLER tests we need to enable early termination */
3262 if (!check_bit(ref_op->ldpc_dec.op_flags,
3263 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
3264 ref_op->ldpc_dec.op_flags +=
3265 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
3266 ref_op->ldpc_dec.iter_max = get_iter_max();
3267 ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
3269 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3270 copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
3271 bufs->hard_outputs, bufs->soft_outputs,
3272 bufs->harq_inputs, bufs->harq_outputs, ref_op);
3273 generate_llr_input(num_ops, bufs->inputs, ref_op);
3275 /* Set counter to validate the ordering */
3276 for (j = 0; j < num_ops; ++j)
3277 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3279 for (i = 0; i < 1; ++i) { /* Could add more iterations */
3280 for (j = 0; j < num_ops; ++j) {
3283 ops_enq[j]->ldpc_dec.hard_output.data);
3284 if (hc_out || loopback)
3286 ops_enq[j]->ldpc_dec.harq_combined_output.data);
3289 preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
3291 start_time = rte_rdtsc_precise();
3293 for (enq = 0, deq = 0; enq < num_ops;) {
3294 num_to_enq = burst_sz;
3296 if (unlikely(num_ops - enq < num_to_enq))
3297 num_to_enq = num_ops - enq;
3299 enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
3300 queue_id, &ops_enq[enq], num_to_enq);
3302 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3303 queue_id, &ops_deq[deq], enq - deq);
3306 /* dequeue the remaining */
3308 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3309 queue_id, &ops_deq[deq], enq - deq);
3312 total_time += rte_rdtsc_precise() - start_time;
3316 tp->iter_average = 0;
3317 /* get the max of iter_count for all dequeued ops */
3318 for (i = 0; i < num_ops; ++i) {
3319 tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
3321 tp->iter_average += (double) ops_enq[i]->ldpc_dec.iter_count;
3322 if (ops_enq[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR))
3326 parity_bler /= num_ops; /* This one is based on SYND */
3327 tp->iter_average /= num_ops;
3328 tp->bler = (double) validate_ldpc_bler(ops_deq, num_ops) / num_ops;
3330 if (test_vector.op_type != RTE_BBDEV_OP_NONE
3334 ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
3335 tp->op_params->vector_mask);
3336 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3339 rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
3341 double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
3342 tp->ops_per_sec = ((double)num_ops * 1) /
3343 ((double)total_time / (double)rte_get_tsc_hz());
3344 tp->mbps = (((double)(num_ops * 1 * tb_len_bits)) /
3345 1000000.0) / ((double)total_time /
3346 (double)rte_get_tsc_hz());
3348 return TEST_SUCCESS;
3352 throughput_pmd_lcore_ldpc_dec(void *arg)
3354 struct thread_params *tp = arg;
3356 uint64_t total_time = 0, start_time;
3357 const uint16_t queue_id = tp->queue_id;
3358 const uint16_t burst_sz = tp->op_params->burst_sz;
3359 const uint16_t num_ops = tp->op_params->num_to_process;
3360 struct rte_bbdev_dec_op *ops_enq[num_ops];
3361 struct rte_bbdev_dec_op *ops_deq[num_ops];
3362 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
3363 struct test_buffers *bufs = NULL;
3365 struct rte_bbdev_info info;
3366 uint16_t num_to_enq;
3367 bool extDdr = check_bit(ldpc_cap_flags,
3368 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
3369 bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
3370 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
3371 bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
3372 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
3374 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3375 "BURST_SIZE should be <= %u", MAX_BURST);
3377 rte_bbdev_info_get(tp->dev_id, &info);
3379 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3380 "NUM_OPS cannot exceed %u for this device",
3381 info.drv.queue_size_lim);
3383 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3385 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3388 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
3389 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
3391 /* For throughput tests we need to disable early termination */
3392 if (check_bit(ref_op->ldpc_dec.op_flags,
3393 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
3394 ref_op->ldpc_dec.op_flags -=
3395 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
3396 ref_op->ldpc_dec.iter_max = get_iter_max();
3397 ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
3399 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3400 copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
3401 bufs->hard_outputs, bufs->soft_outputs,
3402 bufs->harq_inputs, bufs->harq_outputs, ref_op);
3404 /* Set counter to validate the ordering */
3405 for (j = 0; j < num_ops; ++j)
3406 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3408 for (i = 0; i < TEST_REPETITIONS; ++i) {
3409 for (j = 0; j < num_ops; ++j) {
3412 ops_enq[j]->ldpc_dec.hard_output.data);
3413 if (hc_out || loopback)
3415 ops_enq[j]->ldpc_dec.harq_combined_output.data);
3418 preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
3420 start_time = rte_rdtsc_precise();
3422 for (enq = 0, deq = 0; enq < num_ops;) {
3423 num_to_enq = burst_sz;
3425 if (unlikely(num_ops - enq < num_to_enq))
3426 num_to_enq = num_ops - enq;
3428 enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
3429 queue_id, &ops_enq[enq], num_to_enq);
3431 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3432 queue_id, &ops_deq[deq], enq - deq);
3435 /* dequeue the remaining */
3437 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3438 queue_id, &ops_deq[deq], enq - deq);
3441 total_time += rte_rdtsc_precise() - start_time;
3445 /* get the max of iter_count for all dequeued ops */
3446 for (i = 0; i < num_ops; ++i) {
3447 tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
3451 /* Read loopback is not thread safe */
3452 retrieve_harq_ddr(tp->dev_id, queue_id, ops_enq, num_ops);
3455 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
3456 ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
3457 tp->op_params->vector_mask);
3458 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3461 rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
3463 double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
3465 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3466 ((double)total_time / (double)rte_get_tsc_hz());
3467 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
3468 1000000.0) / ((double)total_time /
3469 (double)rte_get_tsc_hz());
3471 return TEST_SUCCESS;
3475 throughput_pmd_lcore_enc(void *arg)
3477 struct thread_params *tp = arg;
3479 uint64_t total_time = 0, start_time;
3480 const uint16_t queue_id = tp->queue_id;
3481 const uint16_t burst_sz = tp->op_params->burst_sz;
3482 const uint16_t num_ops = tp->op_params->num_to_process;
3483 struct rte_bbdev_enc_op *ops_enq[num_ops];
3484 struct rte_bbdev_enc_op *ops_deq[num_ops];
3485 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
3486 struct test_buffers *bufs = NULL;
3488 struct rte_bbdev_info info;
3489 uint16_t num_to_enq;
3491 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3492 "BURST_SIZE should be <= %u", MAX_BURST);
3494 rte_bbdev_info_get(tp->dev_id, &info);
3496 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3497 "NUM_OPS cannot exceed %u for this device",
3498 info.drv.queue_size_lim);
3500 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3502 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3505 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
3507 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
3509 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3510 copy_reference_enc_op(ops_enq, num_ops, 0, bufs->inputs,
3511 bufs->hard_outputs, ref_op);
3513 /* Set counter to validate the ordering */
3514 for (j = 0; j < num_ops; ++j)
3515 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3517 for (i = 0; i < TEST_REPETITIONS; ++i) {
3519 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3520 for (j = 0; j < num_ops; ++j)
3521 mbuf_reset(ops_enq[j]->turbo_enc.output.data);
3523 start_time = rte_rdtsc_precise();
3525 for (enq = 0, deq = 0; enq < num_ops;) {
3526 num_to_enq = burst_sz;
3528 if (unlikely(num_ops - enq < num_to_enq))
3529 num_to_enq = num_ops - enq;
3531 enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
3532 queue_id, &ops_enq[enq], num_to_enq);
3534 deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
3535 queue_id, &ops_deq[deq], enq - deq);
3538 /* dequeue the remaining */
3540 deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
3541 queue_id, &ops_deq[deq], enq - deq);
3544 total_time += rte_rdtsc_precise() - start_time;
3547 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
3548 ret = validate_enc_op(ops_deq, num_ops, ref_op);
3549 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3552 rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
3554 double tb_len_bits = calc_enc_TB_size(ref_op);
3556 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3557 ((double)total_time / (double)rte_get_tsc_hz());
3558 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
3559 / 1000000.0) / ((double)total_time /
3560 (double)rte_get_tsc_hz());
3562 return TEST_SUCCESS;
3566 throughput_pmd_lcore_ldpc_enc(void *arg)
3568 struct thread_params *tp = arg;
3570 uint64_t total_time = 0, start_time;
3571 const uint16_t queue_id = tp->queue_id;
3572 const uint16_t burst_sz = tp->op_params->burst_sz;
3573 const uint16_t num_ops = tp->op_params->num_to_process;
3574 struct rte_bbdev_enc_op *ops_enq[num_ops];
3575 struct rte_bbdev_enc_op *ops_deq[num_ops];
3576 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
3577 struct test_buffers *bufs = NULL;
3579 struct rte_bbdev_info info;
3580 uint16_t num_to_enq;
3582 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3583 "BURST_SIZE should be <= %u", MAX_BURST);
3585 rte_bbdev_info_get(tp->dev_id, &info);
3587 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3588 "NUM_OPS cannot exceed %u for this device",
3589 info.drv.queue_size_lim);
3591 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3593 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3596 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
3598 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
3600 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3601 copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
3602 bufs->hard_outputs, ref_op);
3604 /* Set counter to validate the ordering */
3605 for (j = 0; j < num_ops; ++j)
3606 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3608 for (i = 0; i < TEST_REPETITIONS; ++i) {
3610 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3611 for (j = 0; j < num_ops; ++j)
3612 mbuf_reset(ops_enq[j]->turbo_enc.output.data);
3614 start_time = rte_rdtsc_precise();
3616 for (enq = 0, deq = 0; enq < num_ops;) {
3617 num_to_enq = burst_sz;
3619 if (unlikely(num_ops - enq < num_to_enq))
3620 num_to_enq = num_ops - enq;
3622 enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
3623 queue_id, &ops_enq[enq], num_to_enq);
3625 deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
3626 queue_id, &ops_deq[deq], enq - deq);
3629 /* dequeue the remaining */
3631 deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
3632 queue_id, &ops_deq[deq], enq - deq);
3635 total_time += rte_rdtsc_precise() - start_time;
3638 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
3639 ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
3640 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3643 rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
3645 double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
3647 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3648 ((double)total_time / (double)rte_get_tsc_hz());
3649 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
3650 / 1000000.0) / ((double)total_time /
3651 (double)rte_get_tsc_hz());
3653 return TEST_SUCCESS;
3657 print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
3659 unsigned int iter = 0;
3660 double total_mops = 0, total_mbps = 0;
3662 for (iter = 0; iter < used_cores; iter++) {
3664 "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps\n",
3665 t_params[iter].lcore_id, t_params[iter].ops_per_sec,
3666 t_params[iter].mbps);
3667 total_mops += t_params[iter].ops_per_sec;
3668 total_mbps += t_params[iter].mbps;
3671 "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps\n",
3672 used_cores, total_mops, total_mbps);
3675 /* Aggregate the performance results over the number of cores used */
3677 print_dec_throughput(struct thread_params *t_params, unsigned int used_cores)
3679 unsigned int core_idx = 0;
3680 double total_mops = 0, total_mbps = 0;
3681 uint8_t iter_count = 0;
3683 for (core_idx = 0; core_idx < used_cores; core_idx++) {
3685 "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
3686 t_params[core_idx].lcore_id,
3687 t_params[core_idx].ops_per_sec,
3688 t_params[core_idx].mbps,
3689 t_params[core_idx].iter_count);
3690 total_mops += t_params[core_idx].ops_per_sec;
3691 total_mbps += t_params[core_idx].mbps;
3692 iter_count = RTE_MAX(iter_count,
3693 t_params[core_idx].iter_count);
3696 "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps @ max %u iterations\n",
3697 used_cores, total_mops, total_mbps, iter_count);
3700 /* Aggregate the performance results over the number of cores used */
3702 print_dec_bler(struct thread_params *t_params, unsigned int used_cores)
3704 unsigned int core_idx = 0;
3705 double total_mbps = 0, total_bler = 0, total_iter = 0;
3706 double snr = get_snr();
3708 for (core_idx = 0; core_idx < used_cores; core_idx++) {
3709 printf("Core%u BLER %.1f %% - Iters %.1f - Tp %.1f Mbps %s\n",
3710 t_params[core_idx].lcore_id,
3711 t_params[core_idx].bler * 100,
3712 t_params[core_idx].iter_average,
3713 t_params[core_idx].mbps,
3714 get_vector_filename());
3715 total_mbps += t_params[core_idx].mbps;
3716 total_bler += t_params[core_idx].bler;
3717 total_iter += t_params[core_idx].iter_average;
3719 total_bler /= used_cores;
3720 total_iter /= used_cores;
3722 printf("SNR %.2f BLER %.1f %% - Iterations %.1f %d - Tp %.1f Mbps %s\n",
3723 snr, total_bler * 100, total_iter, get_iter_max(),
3724 total_mbps, get_vector_filename());
3728 * Test function that determines BLER wireless performance
3731 bler_test(struct active_device *ad,
3732 struct test_op_params *op_params)
3735 unsigned int lcore_id, used_cores = 0;
3736 struct thread_params *t_params;
3737 struct rte_bbdev_info info;
3738 lcore_function_t *bler_function;
3739 uint16_t num_lcores;
3740 const char *op_type_str;
3742 rte_bbdev_info_get(ad->dev_id, &info);
3744 op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
3745 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
3746 test_vector.op_type);
3748 printf("+ ------------------------------------------------------- +\n");
3749 printf("== test: bler\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
3750 info.dev_name, ad->nb_queues, op_params->burst_sz,
3751 op_params->num_to_process, op_params->num_lcores,
3753 intr_enabled ? "Interrupt mode" : "PMD mode",
3754 (double)rte_get_tsc_hz() / 1000000000.0);
3756 /* Set number of lcores */
3757 num_lcores = (ad->nb_queues < (op_params->num_lcores))
3759 : op_params->num_lcores;
3761 /* Allocate memory for thread parameters structure */
3762 t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
3763 RTE_CACHE_LINE_SIZE);
3764 TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
3765 RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
3766 RTE_CACHE_LINE_SIZE));
3768 if ((test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) &&
3769 !check_bit(test_vector.ldpc_dec.op_flags,
3770 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
3771 && !check_bit(test_vector.ldpc_dec.op_flags,
3772 RTE_BBDEV_LDPC_LLR_COMPRESSION))
3773 bler_function = bler_pmd_lcore_ldpc_dec;
3775 return TEST_SKIPPED;
3777 rte_atomic16_set(&op_params->sync, SYNC_WAIT);
3779 /* Main core is set at first entry */
3780 t_params[0].dev_id = ad->dev_id;
3781 t_params[0].lcore_id = rte_lcore_id();
3782 t_params[0].op_params = op_params;
3783 t_params[0].queue_id = ad->queue_ids[used_cores++];
3784 t_params[0].iter_count = 0;
3786 RTE_LCORE_FOREACH_WORKER(lcore_id) {
3787 if (used_cores >= num_lcores)
3790 t_params[used_cores].dev_id = ad->dev_id;
3791 t_params[used_cores].lcore_id = lcore_id;
3792 t_params[used_cores].op_params = op_params;
3793 t_params[used_cores].queue_id = ad->queue_ids[used_cores];
3794 t_params[used_cores].iter_count = 0;
3796 rte_eal_remote_launch(bler_function,
3797 &t_params[used_cores++], lcore_id);
3800 rte_atomic16_set(&op_params->sync, SYNC_START);
3801 ret = bler_function(&t_params[0]);
3803 /* Main core is always used */
3804 for (used_cores = 1; used_cores < num_lcores; used_cores++)
3805 ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
3807 print_dec_bler(t_params, num_lcores);
3809 /* Return if test failed */
3815 /* Function to print something here*/
3821 * Test function that determines how long an enqueue + dequeue of a burst
3822 * takes on available lcores.
3825 throughput_test(struct active_device *ad,
3826 struct test_op_params *op_params)
3829 unsigned int lcore_id, used_cores = 0;
3830 struct thread_params *t_params, *tp;
3831 struct rte_bbdev_info info;
3832 lcore_function_t *throughput_function;
3833 uint16_t num_lcores;
3834 const char *op_type_str;
3836 rte_bbdev_info_get(ad->dev_id, &info);
3838 op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
3839 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
3840 test_vector.op_type);
3842 printf("+ ------------------------------------------------------- +\n");
3843 printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
3844 info.dev_name, ad->nb_queues, op_params->burst_sz,
3845 op_params->num_to_process, op_params->num_lcores,
3847 intr_enabled ? "Interrupt mode" : "PMD mode",
3848 (double)rte_get_tsc_hz() / 1000000000.0);
3850 /* Set number of lcores */
3851 num_lcores = (ad->nb_queues < (op_params->num_lcores))
3853 : op_params->num_lcores;
3855 /* Allocate memory for thread parameters structure */
3856 t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
3857 RTE_CACHE_LINE_SIZE);
3858 TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
3859 RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
3860 RTE_CACHE_LINE_SIZE));
3863 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
3864 throughput_function = throughput_intr_lcore_dec;
3865 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
3866 throughput_function = throughput_intr_lcore_ldpc_dec;
3867 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
3868 throughput_function = throughput_intr_lcore_enc;
3869 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
3870 throughput_function = throughput_intr_lcore_ldpc_enc;
3872 throughput_function = throughput_intr_lcore_enc;
3874 /* Dequeue interrupt callback registration */
3875 ret = rte_bbdev_callback_register(ad->dev_id,
3876 RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
3883 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
3884 throughput_function = throughput_pmd_lcore_dec;
3885 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
3886 throughput_function = throughput_pmd_lcore_ldpc_dec;
3887 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
3888 throughput_function = throughput_pmd_lcore_enc;
3889 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
3890 throughput_function = throughput_pmd_lcore_ldpc_enc;
3892 throughput_function = throughput_pmd_lcore_enc;
3895 rte_atomic16_set(&op_params->sync, SYNC_WAIT);
3897 /* Main core is set at first entry */
3898 t_params[0].dev_id = ad->dev_id;
3899 t_params[0].lcore_id = rte_lcore_id();
3900 t_params[0].op_params = op_params;
3901 t_params[0].queue_id = ad->queue_ids[used_cores++];
3902 t_params[0].iter_count = 0;
3904 RTE_LCORE_FOREACH_WORKER(lcore_id) {
3905 if (used_cores >= num_lcores)
3908 t_params[used_cores].dev_id = ad->dev_id;
3909 t_params[used_cores].lcore_id = lcore_id;
3910 t_params[used_cores].op_params = op_params;
3911 t_params[used_cores].queue_id = ad->queue_ids[used_cores];
3912 t_params[used_cores].iter_count = 0;
3914 rte_eal_remote_launch(throughput_function,
3915 &t_params[used_cores++], lcore_id);
3918 rte_atomic16_set(&op_params->sync, SYNC_START);
3919 ret = throughput_function(&t_params[0]);
3921 /* Main core is always used */
3922 for (used_cores = 1; used_cores < num_lcores; used_cores++)
3923 ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
3925 /* Return if test failed */
3931 /* Print throughput if interrupts are disabled and test passed */
3932 if (!intr_enabled) {
3933 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
3934 test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
3935 print_dec_throughput(t_params, num_lcores);
3937 print_enc_throughput(t_params, num_lcores);
3942 /* In interrupt TC we need to wait for the interrupt callback to deqeue
3943 * all pending operations. Skip waiting for queues which reported an
3944 * error using processing_status variable.
3945 * Wait for main lcore operations.
3948 while ((rte_atomic16_read(&tp->nb_dequeued) <
3949 op_params->num_to_process) &&
3950 (rte_atomic16_read(&tp->processing_status) !=
3954 tp->ops_per_sec /= TEST_REPETITIONS;
3955 tp->mbps /= TEST_REPETITIONS;
3956 ret |= (int)rte_atomic16_read(&tp->processing_status);
3958 /* Wait for worker lcores operations */
3959 for (used_cores = 1; used_cores < num_lcores; used_cores++) {
3960 tp = &t_params[used_cores];
3962 while ((rte_atomic16_read(&tp->nb_dequeued) <
3963 op_params->num_to_process) &&
3964 (rte_atomic16_read(&tp->processing_status) !=
3968 tp->ops_per_sec /= TEST_REPETITIONS;
3969 tp->mbps /= TEST_REPETITIONS;
3970 ret |= (int)rte_atomic16_read(&tp->processing_status);
3973 /* Print throughput if test passed */
3975 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
3976 test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
3977 print_dec_throughput(t_params, num_lcores);
3978 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
3979 test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
3980 print_enc_throughput(t_params, num_lcores);
3988 latency_test_dec(struct rte_mempool *mempool,
3989 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
3990 int vector_mask, uint16_t dev_id, uint16_t queue_id,
3991 const uint16_t num_to_process, uint16_t burst_sz,
3992 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
3994 int ret = TEST_SUCCESS;
3995 uint16_t i, j, dequeued;
3996 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3997 uint64_t start_time = 0, last_time = 0;
3999 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4000 uint16_t enq = 0, deq = 0;
4001 bool first_time = true;
4004 if (unlikely(num_to_process - dequeued < burst_sz))
4005 burst_sz = num_to_process - dequeued;
4007 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
4008 TEST_ASSERT_SUCCESS(ret,
4009 "rte_bbdev_dec_op_alloc_bulk() failed");
4010 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4011 copy_reference_dec_op(ops_enq, burst_sz, dequeued,
4017 /* Set counter to validate the ordering */
4018 for (j = 0; j < burst_sz; ++j)
4019 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
4021 start_time = rte_rdtsc_precise();
4023 enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
4025 TEST_ASSERT(enq == burst_sz,
4026 "Error enqueueing burst, expected %u, got %u",
4031 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
4032 &ops_deq[deq], burst_sz - deq);
4033 if (likely(first_time && (deq > 0))) {
4034 last_time = rte_rdtsc_precise() - start_time;
4037 } while (unlikely(burst_sz != deq));
4039 *max_time = RTE_MAX(*max_time, last_time);
4040 *min_time = RTE_MIN(*min_time, last_time);
4041 *total_time += last_time;
4043 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
4044 ret = validate_dec_op(ops_deq, burst_sz, ref_op,
4046 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4049 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4056 /* Test case for latency/validation for LDPC Decoder */
4058 latency_test_ldpc_dec(struct rte_mempool *mempool,
4059 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
4060 int vector_mask, uint16_t dev_id, uint16_t queue_id,
4061 const uint16_t num_to_process, uint16_t burst_sz,
4062 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time,
4065 int ret = TEST_SUCCESS;
4066 uint16_t i, j, dequeued;
4067 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4068 uint64_t start_time = 0, last_time = 0;
4069 bool extDdr = ldpc_cap_flags &
4070 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
4072 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4073 uint16_t enq = 0, deq = 0;
4074 bool first_time = true;
4077 if (unlikely(num_to_process - dequeued < burst_sz))
4078 burst_sz = num_to_process - dequeued;
4080 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
4081 TEST_ASSERT_SUCCESS(ret,
4082 "rte_bbdev_dec_op_alloc_bulk() failed");
4084 /* For latency tests we need to disable early termination */
4085 if (disable_et && check_bit(ref_op->ldpc_dec.op_flags,
4086 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
4087 ref_op->ldpc_dec.op_flags -=
4088 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
4089 ref_op->ldpc_dec.iter_max = get_iter_max();
4090 ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
4092 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4093 copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
4102 preload_harq_ddr(dev_id, queue_id, ops_enq,
4105 /* Set counter to validate the ordering */
4106 for (j = 0; j < burst_sz; ++j)
4107 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
4109 start_time = rte_rdtsc_precise();
4111 enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
4112 &ops_enq[enq], burst_sz);
4113 TEST_ASSERT(enq == burst_sz,
4114 "Error enqueueing burst, expected %u, got %u",
4119 deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
4120 &ops_deq[deq], burst_sz - deq);
4121 if (likely(first_time && (deq > 0))) {
4122 last_time = rte_rdtsc_precise() - start_time;
4125 } while (unlikely(burst_sz != deq));
4127 *max_time = RTE_MAX(*max_time, last_time);
4128 *min_time = RTE_MIN(*min_time, last_time);
4129 *total_time += last_time;
4132 retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
4134 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
4135 ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
4137 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4140 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4147 latency_test_enc(struct rte_mempool *mempool,
4148 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
4149 uint16_t dev_id, uint16_t queue_id,
4150 const uint16_t num_to_process, uint16_t burst_sz,
4151 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
4153 int ret = TEST_SUCCESS;
4154 uint16_t i, j, dequeued;
4155 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4156 uint64_t start_time = 0, last_time = 0;
4158 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4159 uint16_t enq = 0, deq = 0;
4160 bool first_time = true;
4163 if (unlikely(num_to_process - dequeued < burst_sz))
4164 burst_sz = num_to_process - dequeued;
4166 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4167 TEST_ASSERT_SUCCESS(ret,
4168 "rte_bbdev_enc_op_alloc_bulk() failed");
4169 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4170 copy_reference_enc_op(ops_enq, burst_sz, dequeued,
4175 /* Set counter to validate the ordering */
4176 for (j = 0; j < burst_sz; ++j)
4177 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
4179 start_time = rte_rdtsc_precise();
4181 enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
4183 TEST_ASSERT(enq == burst_sz,
4184 "Error enqueueing burst, expected %u, got %u",
4189 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4190 &ops_deq[deq], burst_sz - deq);
4191 if (likely(first_time && (deq > 0))) {
4192 last_time += rte_rdtsc_precise() - start_time;
4195 } while (unlikely(burst_sz != deq));
4197 *max_time = RTE_MAX(*max_time, last_time);
4198 *min_time = RTE_MIN(*min_time, last_time);
4199 *total_time += last_time;
4201 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
4202 ret = validate_enc_op(ops_deq, burst_sz, ref_op);
4203 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4206 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4214 latency_test_ldpc_enc(struct rte_mempool *mempool,
4215 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
4216 uint16_t dev_id, uint16_t queue_id,
4217 const uint16_t num_to_process, uint16_t burst_sz,
4218 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
4220 int ret = TEST_SUCCESS;
4221 uint16_t i, j, dequeued;
4222 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4223 uint64_t start_time = 0, last_time = 0;
4225 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4226 uint16_t enq = 0, deq = 0;
4227 bool first_time = true;
4230 if (unlikely(num_to_process - dequeued < burst_sz))
4231 burst_sz = num_to_process - dequeued;
4233 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4234 TEST_ASSERT_SUCCESS(ret,
4235 "rte_bbdev_enc_op_alloc_bulk() failed");
4236 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4237 copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
4242 /* Set counter to validate the ordering */
4243 for (j = 0; j < burst_sz; ++j)
4244 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
4246 start_time = rte_rdtsc_precise();
4248 enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
4249 &ops_enq[enq], burst_sz);
4250 TEST_ASSERT(enq == burst_sz,
4251 "Error enqueueing burst, expected %u, got %u",
4256 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
4257 &ops_deq[deq], burst_sz - deq);
4258 if (likely(first_time && (deq > 0))) {
4259 last_time += rte_rdtsc_precise() - start_time;
4262 } while (unlikely(burst_sz != deq));
4264 *max_time = RTE_MAX(*max_time, last_time);
4265 *min_time = RTE_MIN(*min_time, last_time);
4266 *total_time += last_time;
4268 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
4269 ret = validate_enc_op(ops_deq, burst_sz, ref_op);
4270 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4273 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4280 /* Common function for running validation and latency test cases */
4282 validation_latency_test(struct active_device *ad,
4283 struct test_op_params *op_params, bool latency_flag)
4286 uint16_t burst_sz = op_params->burst_sz;
4287 const uint16_t num_to_process = op_params->num_to_process;
4288 const enum rte_bbdev_op_type op_type = test_vector.op_type;
4289 const uint16_t queue_id = ad->queue_ids[0];
4290 struct test_buffers *bufs = NULL;
4291 struct rte_bbdev_info info;
4292 uint64_t total_time, min_time, max_time;
4293 const char *op_type_str;
4295 total_time = max_time = 0;
4296 min_time = UINT64_MAX;
4298 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4299 "BURST_SIZE should be <= %u", MAX_BURST);
4301 rte_bbdev_info_get(ad->dev_id, &info);
4302 bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
4304 op_type_str = rte_bbdev_op_type_str(op_type);
4305 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4307 printf("+ ------------------------------------------------------- +\n");
4309 printf("== test: latency\ndev:");
4311 printf("== test: validation\ndev:");
4312 printf("%s, burst size: %u, num ops: %u, op type: %s\n",
4313 info.dev_name, burst_sz, num_to_process, op_type_str);
4315 if (op_type == RTE_BBDEV_OP_TURBO_DEC)
4316 iter = latency_test_dec(op_params->mp, bufs,
4317 op_params->ref_dec_op, op_params->vector_mask,
4318 ad->dev_id, queue_id, num_to_process,
4319 burst_sz, &total_time, &min_time, &max_time);
4320 else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
4321 iter = latency_test_ldpc_enc(op_params->mp, bufs,
4322 op_params->ref_enc_op, ad->dev_id, queue_id,
4323 num_to_process, burst_sz, &total_time,
4324 &min_time, &max_time);
4325 else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
4326 iter = latency_test_ldpc_dec(op_params->mp, bufs,
4327 op_params->ref_dec_op, op_params->vector_mask,
4328 ad->dev_id, queue_id, num_to_process,
4329 burst_sz, &total_time, &min_time, &max_time,
4331 else /* RTE_BBDEV_OP_TURBO_ENC */
4332 iter = latency_test_enc(op_params->mp, bufs,
4333 op_params->ref_enc_op,
4334 ad->dev_id, queue_id,
4335 num_to_process, burst_sz, &total_time,
4336 &min_time, &max_time);
4341 printf("Operation latency:\n"
4342 "\tavg: %lg cycles, %lg us\n"
4343 "\tmin: %lg cycles, %lg us\n"
4344 "\tmax: %lg cycles, %lg us\n",
4345 (double)total_time / (double)iter,
4346 (double)(total_time * 1000000) / (double)iter /
4347 (double)rte_get_tsc_hz(), (double)min_time,
4348 (double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
4349 (double)max_time, (double)(max_time * 1000000) /
4350 (double)rte_get_tsc_hz());
4352 return TEST_SUCCESS;
4356 latency_test(struct active_device *ad, struct test_op_params *op_params)
4358 return validation_latency_test(ad, op_params, true);
4362 validation_test(struct active_device *ad, struct test_op_params *op_params)
4364 return validation_latency_test(ad, op_params, false);
4367 #ifdef RTE_BBDEV_OFFLOAD_COST
4369 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
4370 struct rte_bbdev_stats *stats)
4372 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
4373 struct rte_bbdev_stats *q_stats;
4375 if (queue_id >= dev->data->num_queues)
4378 q_stats = &dev->data->queues[queue_id].queue_stats;
4380 stats->enqueued_count = q_stats->enqueued_count;
4381 stats->dequeued_count = q_stats->dequeued_count;
4382 stats->enqueue_err_count = q_stats->enqueue_err_count;
4383 stats->dequeue_err_count = q_stats->dequeue_err_count;
4384 stats->acc_offload_cycles = q_stats->acc_offload_cycles;
4390 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
4391 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
4392 uint16_t queue_id, const uint16_t num_to_process,
4393 uint16_t burst_sz, struct test_time_stats *time_st)
4395 int i, dequeued, ret;
4396 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4397 uint64_t enq_start_time, deq_start_time;
4398 uint64_t enq_sw_last_time, deq_last_time;
4399 struct rte_bbdev_stats stats;
4401 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4402 uint16_t enq = 0, deq = 0;
4404 if (unlikely(num_to_process - dequeued < burst_sz))
4405 burst_sz = num_to_process - dequeued;
4407 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
4408 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4409 copy_reference_dec_op(ops_enq, burst_sz, dequeued,
4415 /* Start time meas for enqueue function offload latency */
4416 enq_start_time = rte_rdtsc_precise();
4418 enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
4419 &ops_enq[enq], burst_sz - enq);
4420 } while (unlikely(burst_sz != enq));
4422 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4423 TEST_ASSERT_SUCCESS(ret,
4424 "Failed to get stats for queue (%u) of device (%u)",
4427 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
4428 stats.acc_offload_cycles;
4429 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4431 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4433 time_st->enq_sw_total_time += enq_sw_last_time;
4435 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
4436 stats.acc_offload_cycles);
4437 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
4438 stats.acc_offload_cycles);
4439 time_st->enq_acc_total_time += stats.acc_offload_cycles;
4441 /* give time for device to process ops */
4442 rte_delay_us(WAIT_OFFLOAD_US);
4444 /* Start time meas for dequeue function offload latency */
4445 deq_start_time = rte_rdtsc_precise();
4446 /* Dequeue one operation */
4448 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
4449 &ops_deq[deq], enq);
4450 } while (unlikely(deq == 0));
4452 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4453 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4455 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4457 time_st->deq_total_time += deq_last_time;
4459 /* Dequeue remaining operations if needed*/
4460 while (burst_sz != deq)
4461 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
4462 &ops_deq[deq], burst_sz - deq);
4464 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4472 offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
4473 struct test_buffers *bufs,
4474 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
4475 uint16_t queue_id, const uint16_t num_to_process,
4476 uint16_t burst_sz, struct test_time_stats *time_st)
4478 int i, dequeued, ret;
4479 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4480 uint64_t enq_start_time, deq_start_time;
4481 uint64_t enq_sw_last_time, deq_last_time;
4482 struct rte_bbdev_stats stats;
4483 bool extDdr = ldpc_cap_flags &
4484 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
4486 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4487 uint16_t enq = 0, deq = 0;
4489 if (unlikely(num_to_process - dequeued < burst_sz))
4490 burst_sz = num_to_process - dequeued;
4492 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
4493 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4494 copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
4503 preload_harq_ddr(dev_id, queue_id, ops_enq,
4506 /* Start time meas for enqueue function offload latency */
4507 enq_start_time = rte_rdtsc_precise();
4509 enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
4510 &ops_enq[enq], burst_sz - enq);
4511 } while (unlikely(burst_sz != enq));
4513 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
4514 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4515 TEST_ASSERT_SUCCESS(ret,
4516 "Failed to get stats for queue (%u) of device (%u)",
4519 enq_sw_last_time -= stats.acc_offload_cycles;
4520 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4522 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4524 time_st->enq_sw_total_time += enq_sw_last_time;
4526 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
4527 stats.acc_offload_cycles);
4528 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
4529 stats.acc_offload_cycles);
4530 time_st->enq_acc_total_time += stats.acc_offload_cycles;
4532 /* give time for device to process ops */
4533 rte_delay_us(WAIT_OFFLOAD_US);
4535 /* Start time meas for dequeue function offload latency */
4536 deq_start_time = rte_rdtsc_precise();
4537 /* Dequeue one operation */
4539 deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
4540 &ops_deq[deq], enq);
4541 } while (unlikely(deq == 0));
4543 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4544 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4546 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4548 time_st->deq_total_time += deq_last_time;
4550 /* Dequeue remaining operations if needed*/
4551 while (burst_sz != deq)
4552 deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
4553 &ops_deq[deq], burst_sz - deq);
4556 /* Read loopback is not thread safe */
4557 retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
4560 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4568 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
4569 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
4570 uint16_t queue_id, const uint16_t num_to_process,
4571 uint16_t burst_sz, struct test_time_stats *time_st)
4573 int i, dequeued, ret;
4574 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4575 uint64_t enq_start_time, deq_start_time;
4576 uint64_t enq_sw_last_time, deq_last_time;
4577 struct rte_bbdev_stats stats;
4579 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4580 uint16_t enq = 0, deq = 0;
4582 if (unlikely(num_to_process - dequeued < burst_sz))
4583 burst_sz = num_to_process - dequeued;
4585 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4586 TEST_ASSERT_SUCCESS(ret,
4587 "rte_bbdev_enc_op_alloc_bulk() failed");
4588 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4589 copy_reference_enc_op(ops_enq, burst_sz, dequeued,
4594 /* Start time meas for enqueue function offload latency */
4595 enq_start_time = rte_rdtsc_precise();
4597 enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
4598 &ops_enq[enq], burst_sz - enq);
4599 } while (unlikely(burst_sz != enq));
4601 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
4603 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4604 TEST_ASSERT_SUCCESS(ret,
4605 "Failed to get stats for queue (%u) of device (%u)",
4607 enq_sw_last_time -= stats.acc_offload_cycles;
4608 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4610 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4612 time_st->enq_sw_total_time += enq_sw_last_time;
4614 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
4615 stats.acc_offload_cycles);
4616 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
4617 stats.acc_offload_cycles);
4618 time_st->enq_acc_total_time += stats.acc_offload_cycles;
4620 /* give time for device to process ops */
4621 rte_delay_us(WAIT_OFFLOAD_US);
4623 /* Start time meas for dequeue function offload latency */
4624 deq_start_time = rte_rdtsc_precise();
4625 /* Dequeue one operation */
4627 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4628 &ops_deq[deq], enq);
4629 } while (unlikely(deq == 0));
4631 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4632 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4634 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4636 time_st->deq_total_time += deq_last_time;
4638 while (burst_sz != deq)
4639 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4640 &ops_deq[deq], burst_sz - deq);
4642 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4650 offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
4651 struct test_buffers *bufs,
4652 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
4653 uint16_t queue_id, const uint16_t num_to_process,
4654 uint16_t burst_sz, struct test_time_stats *time_st)
4656 int i, dequeued, ret;
4657 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4658 uint64_t enq_start_time, deq_start_time;
4659 uint64_t enq_sw_last_time, deq_last_time;
4660 struct rte_bbdev_stats stats;
4662 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4663 uint16_t enq = 0, deq = 0;
4665 if (unlikely(num_to_process - dequeued < burst_sz))
4666 burst_sz = num_to_process - dequeued;
4668 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4669 TEST_ASSERT_SUCCESS(ret,
4670 "rte_bbdev_enc_op_alloc_bulk() failed");
4671 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4672 copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
4677 /* Start time meas for enqueue function offload latency */
4678 enq_start_time = rte_rdtsc_precise();
4680 enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
4681 &ops_enq[enq], burst_sz - enq);
4682 } while (unlikely(burst_sz != enq));
4684 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
4685 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4686 TEST_ASSERT_SUCCESS(ret,
4687 "Failed to get stats for queue (%u) of device (%u)",
4690 enq_sw_last_time -= stats.acc_offload_cycles;
4691 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4693 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4695 time_st->enq_sw_total_time += enq_sw_last_time;
4697 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
4698 stats.acc_offload_cycles);
4699 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
4700 stats.acc_offload_cycles);
4701 time_st->enq_acc_total_time += stats.acc_offload_cycles;
4703 /* give time for device to process ops */
4704 rte_delay_us(WAIT_OFFLOAD_US);
4706 /* Start time meas for dequeue function offload latency */
4707 deq_start_time = rte_rdtsc_precise();
4708 /* Dequeue one operation */
4710 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
4711 &ops_deq[deq], enq);
4712 } while (unlikely(deq == 0));
4714 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4715 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4717 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4719 time_st->deq_total_time += deq_last_time;
4721 while (burst_sz != deq)
4722 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
4723 &ops_deq[deq], burst_sz - deq);
4725 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4734 offload_cost_test(struct active_device *ad,
4735 struct test_op_params *op_params)
4737 #ifndef RTE_BBDEV_OFFLOAD_COST
4739 RTE_SET_USED(op_params);
4740 printf("Offload latency test is disabled.\n");
4741 printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
4742 return TEST_SKIPPED;
4745 uint16_t burst_sz = op_params->burst_sz;
4746 const uint16_t num_to_process = op_params->num_to_process;
4747 const enum rte_bbdev_op_type op_type = test_vector.op_type;
4748 const uint16_t queue_id = ad->queue_ids[0];
4749 struct test_buffers *bufs = NULL;
4750 struct rte_bbdev_info info;
4751 const char *op_type_str;
4752 struct test_time_stats time_st;
4754 memset(&time_st, 0, sizeof(struct test_time_stats));
4755 time_st.enq_sw_min_time = UINT64_MAX;
4756 time_st.enq_acc_min_time = UINT64_MAX;
4757 time_st.deq_min_time = UINT64_MAX;
4759 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4760 "BURST_SIZE should be <= %u", MAX_BURST);
4762 rte_bbdev_info_get(ad->dev_id, &info);
4763 bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
4765 op_type_str = rte_bbdev_op_type_str(op_type);
4766 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4768 printf("+ ------------------------------------------------------- +\n");
4769 printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
4770 info.dev_name, burst_sz, num_to_process, op_type_str);
4772 if (op_type == RTE_BBDEV_OP_TURBO_DEC)
4773 iter = offload_latency_test_dec(op_params->mp, bufs,
4774 op_params->ref_dec_op, ad->dev_id, queue_id,
4775 num_to_process, burst_sz, &time_st);
4776 else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
4777 iter = offload_latency_test_enc(op_params->mp, bufs,
4778 op_params->ref_enc_op, ad->dev_id, queue_id,
4779 num_to_process, burst_sz, &time_st);
4780 else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
4781 iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
4782 op_params->ref_enc_op, ad->dev_id, queue_id,
4783 num_to_process, burst_sz, &time_st);
4784 else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
4785 iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
4786 op_params->ref_dec_op, ad->dev_id, queue_id,
4787 num_to_process, burst_sz, &time_st);
4789 iter = offload_latency_test_enc(op_params->mp, bufs,
4790 op_params->ref_enc_op, ad->dev_id, queue_id,
4791 num_to_process, burst_sz, &time_st);
4796 printf("Enqueue driver offload cost latency:\n"
4797 "\tavg: %lg cycles, %lg us\n"
4798 "\tmin: %lg cycles, %lg us\n"
4799 "\tmax: %lg cycles, %lg us\n"
4800 "Enqueue accelerator offload cost latency:\n"
4801 "\tavg: %lg cycles, %lg us\n"
4802 "\tmin: %lg cycles, %lg us\n"
4803 "\tmax: %lg cycles, %lg us\n",
4804 (double)time_st.enq_sw_total_time / (double)iter,
4805 (double)(time_st.enq_sw_total_time * 1000000) /
4806 (double)iter / (double)rte_get_tsc_hz(),
4807 (double)time_st.enq_sw_min_time,
4808 (double)(time_st.enq_sw_min_time * 1000000) /
4809 rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
4810 (double)(time_st.enq_sw_max_time * 1000000) /
4811 rte_get_tsc_hz(), (double)time_st.enq_acc_total_time /
4813 (double)(time_st.enq_acc_total_time * 1000000) /
4814 (double)iter / (double)rte_get_tsc_hz(),
4815 (double)time_st.enq_acc_min_time,
4816 (double)(time_st.enq_acc_min_time * 1000000) /
4817 rte_get_tsc_hz(), (double)time_st.enq_acc_max_time,
4818 (double)(time_st.enq_acc_max_time * 1000000) /
4821 printf("Dequeue offload cost latency - one op:\n"
4822 "\tavg: %lg cycles, %lg us\n"
4823 "\tmin: %lg cycles, %lg us\n"
4824 "\tmax: %lg cycles, %lg us\n",
4825 (double)time_st.deq_total_time / (double)iter,
4826 (double)(time_st.deq_total_time * 1000000) /
4827 (double)iter / (double)rte_get_tsc_hz(),
4828 (double)time_st.deq_min_time,
4829 (double)(time_st.deq_min_time * 1000000) /
4830 rte_get_tsc_hz(), (double)time_st.deq_max_time,
4831 (double)(time_st.deq_max_time * 1000000) /
4834 struct rte_bbdev_stats stats = {0};
4835 get_bbdev_queue_stats(ad->dev_id, queue_id, &stats);
4836 if (op_type != RTE_BBDEV_OP_LDPC_DEC) {
4837 TEST_ASSERT_SUCCESS(stats.enqueued_count != num_to_process,
4838 "Mismatch in enqueue count %10"PRIu64" %d",
4839 stats.enqueued_count, num_to_process);
4840 TEST_ASSERT_SUCCESS(stats.dequeued_count != num_to_process,
4841 "Mismatch in dequeue count %10"PRIu64" %d",
4842 stats.dequeued_count, num_to_process);
4844 TEST_ASSERT_SUCCESS(stats.enqueue_err_count != 0,
4845 "Enqueue count Error %10"PRIu64"",
4846 stats.enqueue_err_count);
4847 TEST_ASSERT_SUCCESS(stats.dequeue_err_count != 0,
4848 "Dequeue count Error (%10"PRIu64"",
4849 stats.dequeue_err_count);
4851 return TEST_SUCCESS;
4855 #ifdef RTE_BBDEV_OFFLOAD_COST
4857 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
4858 const uint16_t num_to_process, uint16_t burst_sz,
4859 uint64_t *deq_total_time, uint64_t *deq_min_time,
4860 uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
4863 struct rte_bbdev_dec_op *ops[MAX_BURST];
4864 uint64_t deq_start_time, deq_last_time;
4866 /* Test deq offload latency from an empty queue */
4868 for (i = 0, deq_total = 0; deq_total < num_to_process;
4869 ++i, deq_total += burst_sz) {
4870 deq_start_time = rte_rdtsc_precise();
4872 if (unlikely(num_to_process - deq_total < burst_sz))
4873 burst_sz = num_to_process - deq_total;
4874 if (op_type == RTE_BBDEV_OP_LDPC_DEC)
4875 rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id, ops,
4878 rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops,
4881 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4882 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
4883 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
4884 *deq_total_time += deq_last_time;
4891 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
4892 const uint16_t num_to_process, uint16_t burst_sz,
4893 uint64_t *deq_total_time, uint64_t *deq_min_time,
4894 uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
4897 struct rte_bbdev_enc_op *ops[MAX_BURST];
4898 uint64_t deq_start_time, deq_last_time;
4900 /* Test deq offload latency from an empty queue */
4901 for (i = 0, deq_total = 0; deq_total < num_to_process;
4902 ++i, deq_total += burst_sz) {
4903 deq_start_time = rte_rdtsc_precise();
4905 if (unlikely(num_to_process - deq_total < burst_sz))
4906 burst_sz = num_to_process - deq_total;
4907 if (op_type == RTE_BBDEV_OP_LDPC_ENC)
4908 rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id, ops,
4911 rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops,
4914 deq_last_time = rte_rdtsc_precise() - deq_start_time;
4915 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
4916 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
4917 *deq_total_time += deq_last_time;
4926 offload_latency_empty_q_test(struct active_device *ad,
4927 struct test_op_params *op_params)
4929 #ifndef RTE_BBDEV_OFFLOAD_COST
4931 RTE_SET_USED(op_params);
4932 printf("Offload latency empty dequeue test is disabled.\n");
4933 printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
4934 return TEST_SKIPPED;
4937 uint64_t deq_total_time, deq_min_time, deq_max_time;
4938 uint16_t burst_sz = op_params->burst_sz;
4939 const uint16_t num_to_process = op_params->num_to_process;
4940 const enum rte_bbdev_op_type op_type = test_vector.op_type;
4941 const uint16_t queue_id = ad->queue_ids[0];
4942 struct rte_bbdev_info info;
4943 const char *op_type_str;
4945 deq_total_time = deq_max_time = 0;
4946 deq_min_time = UINT64_MAX;
4948 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4949 "BURST_SIZE should be <= %u", MAX_BURST);
4951 rte_bbdev_info_get(ad->dev_id, &info);
4953 op_type_str = rte_bbdev_op_type_str(op_type);
4954 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4956 printf("+ ------------------------------------------------------- +\n");
4957 printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
4958 info.dev_name, burst_sz, num_to_process, op_type_str);
4960 if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
4961 op_type == RTE_BBDEV_OP_LDPC_DEC)
4962 iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
4963 num_to_process, burst_sz, &deq_total_time,
4964 &deq_min_time, &deq_max_time, op_type);
4966 iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
4967 num_to_process, burst_sz, &deq_total_time,
4968 &deq_min_time, &deq_max_time, op_type);
4973 printf("Empty dequeue offload:\n"
4974 "\tavg: %lg cycles, %lg us\n"
4975 "\tmin: %lg cycles, %lg us\n"
4976 "\tmax: %lg cycles, %lg us\n",
4977 (double)deq_total_time / (double)iter,
4978 (double)(deq_total_time * 1000000) / (double)iter /
4979 (double)rte_get_tsc_hz(), (double)deq_min_time,
4980 (double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
4981 (double)deq_max_time, (double)(deq_max_time * 1000000) /
4984 return TEST_SUCCESS;
4991 return run_test_case(bler_test);
4997 return run_test_case(throughput_test);
5001 offload_cost_tc(void)
5003 return run_test_case(offload_cost_test);
5007 offload_latency_empty_q_tc(void)
5009 return run_test_case(offload_latency_empty_q_test);
5015 return run_test_case(latency_test);
5021 return run_test_case(validation_test);
5027 return run_test_case(throughput_test);
5030 static struct unit_test_suite bbdev_bler_testsuite = {
5031 .suite_name = "BBdev BLER Tests",
5032 .setup = testsuite_setup,
5033 .teardown = testsuite_teardown,
5034 .unit_test_cases = {
5035 TEST_CASE_ST(ut_setup, ut_teardown, bler_tc),
5036 TEST_CASES_END() /**< NULL terminate unit test array */
5040 static struct unit_test_suite bbdev_throughput_testsuite = {
5041 .suite_name = "BBdev Throughput Tests",
5042 .setup = testsuite_setup,
5043 .teardown = testsuite_teardown,
5044 .unit_test_cases = {
5045 TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
5046 TEST_CASES_END() /**< NULL terminate unit test array */
5050 static struct unit_test_suite bbdev_validation_testsuite = {
5051 .suite_name = "BBdev Validation Tests",
5052 .setup = testsuite_setup,
5053 .teardown = testsuite_teardown,
5054 .unit_test_cases = {
5055 TEST_CASE_ST(ut_setup, ut_teardown, validation_tc),
5056 TEST_CASES_END() /**< NULL terminate unit test array */
5060 static struct unit_test_suite bbdev_latency_testsuite = {
5061 .suite_name = "BBdev Latency Tests",
5062 .setup = testsuite_setup,
5063 .teardown = testsuite_teardown,
5064 .unit_test_cases = {
5065 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
5066 TEST_CASES_END() /**< NULL terminate unit test array */
5070 static struct unit_test_suite bbdev_offload_cost_testsuite = {
5071 .suite_name = "BBdev Offload Cost Tests",
5072 .setup = testsuite_setup,
5073 .teardown = testsuite_teardown,
5074 .unit_test_cases = {
5075 TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
5076 TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
5077 TEST_CASES_END() /**< NULL terminate unit test array */
5081 static struct unit_test_suite bbdev_interrupt_testsuite = {
5082 .suite_name = "BBdev Interrupt Tests",
5083 .setup = interrupt_testsuite_setup,
5084 .teardown = testsuite_teardown,
5085 .unit_test_cases = {
5086 TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
5087 TEST_CASES_END() /**< NULL terminate unit test array */
5091 REGISTER_TEST_COMMAND(bler, bbdev_bler_testsuite);
5092 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
5093 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
5094 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
5095 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
5096 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);