1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <rte_common.h>
12 #include <rte_launch.h>
13 #include <rte_bbdev.h>
14 #include <rte_cycles.h>
15 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_hexdump.h>
19 #include <rte_interrupts.h>
21 #ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
22 #include <fpga_lte_fec.h>
26 #include "test_bbdev_vector.h"
28 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
30 #define MAX_QUEUES RTE_MAX_LCORE
31 #define TEST_REPETITIONS 1000
33 #ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
34 #define FPGA_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
35 #define FPGA_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
36 #define VF_UL_QUEUE_VALUE 4
37 #define VF_DL_QUEUE_VALUE 4
38 #define UL_BANDWIDTH 3
39 #define DL_BANDWIDTH 3
40 #define UL_LOAD_BALANCE 128
41 #define DL_LOAD_BALANCE 128
42 #define FLR_TIMEOUT 610
45 #define OPS_CACHE_SIZE 256U
46 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
51 #define INVALID_QUEUE_ID -1
53 static struct test_bbdev_vector test_vector;
55 /* Switch between PMD and Interrupt for throughput TC */
56 static bool intr_enabled;
58 /* Represents tested active devices */
59 static struct active_device {
60 const char *driver_name;
62 uint16_t supported_ops;
63 uint16_t queue_ids[MAX_QUEUES];
65 struct rte_mempool *ops_mempool;
66 struct rte_mempool *in_mbuf_pool;
67 struct rte_mempool *hard_out_mbuf_pool;
68 struct rte_mempool *soft_out_mbuf_pool;
69 struct rte_mempool *harq_in_mbuf_pool;
70 struct rte_mempool *harq_out_mbuf_pool;
71 } active_devs[RTE_BBDEV_MAX_DEVS];
73 static uint8_t nb_active_devs;
75 /* Data buffers used by BBDEV ops */
77 struct rte_bbdev_op_data *inputs;
78 struct rte_bbdev_op_data *hard_outputs;
79 struct rte_bbdev_op_data *soft_outputs;
80 struct rte_bbdev_op_data *harq_inputs;
81 struct rte_bbdev_op_data *harq_outputs;
84 /* Operation parameters specific for given test case */
85 struct test_op_params {
86 struct rte_mempool *mp;
87 struct rte_bbdev_dec_op *ref_dec_op;
88 struct rte_bbdev_enc_op *ref_enc_op;
90 uint16_t num_to_process;
94 struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
97 /* Contains per lcore params */
98 struct thread_params {
106 rte_atomic16_t nb_dequeued;
107 rte_atomic16_t processing_status;
108 rte_atomic16_t burst_sz;
109 struct test_op_params *op_params;
110 struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
111 struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
114 #ifdef RTE_BBDEV_OFFLOAD_COST
115 /* Stores time statistics */
116 struct test_time_stats {
117 /* Stores software enqueue total working time */
118 uint64_t enq_sw_total_time;
119 /* Stores minimum value of software enqueue working time */
120 uint64_t enq_sw_min_time;
121 /* Stores maximum value of software enqueue working time */
122 uint64_t enq_sw_max_time;
123 /* Stores turbo enqueue total working time */
124 uint64_t enq_acc_total_time;
125 /* Stores minimum value of accelerator enqueue working time */
126 uint64_t enq_acc_min_time;
127 /* Stores maximum value of accelerator enqueue working time */
128 uint64_t enq_acc_max_time;
129 /* Stores dequeue total working time */
130 uint64_t deq_total_time;
131 /* Stores minimum value of dequeue working time */
132 uint64_t deq_min_time;
133 /* Stores maximum value of dequeue working time */
134 uint64_t deq_max_time;
138 typedef int (test_case_function)(struct active_device *ad,
139 struct test_op_params *op_params);
142 mbuf_reset(struct rte_mbuf *m)
152 /* Read flag value 0/1 from bitmap */
154 check_bit(uint32_t bitmap, uint32_t bitmask)
156 return bitmap & bitmask;
160 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
162 ad->supported_ops |= (1 << op_type);
166 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
168 return ad->supported_ops & (1 << op_type);
172 flags_match(uint32_t flags_req, uint32_t flags_present)
174 return (flags_req & flags_present) == flags_req;
178 clear_soft_out_cap(uint32_t *op_flags)
180 *op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
181 *op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
182 *op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
186 check_dev_cap(const struct rte_bbdev_info *dev_info)
189 unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
190 nb_harq_inputs, nb_harq_outputs;
191 const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
193 nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
194 nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
195 nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
196 nb_harq_inputs = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
197 nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
199 for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
200 if (op_cap->type != test_vector.op_type)
203 if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
204 const struct rte_bbdev_op_cap_turbo_dec *cap =
205 &op_cap->cap.turbo_dec;
206 /* Ignore lack of soft output capability, just skip
207 * checking if soft output is valid.
209 if ((test_vector.turbo_dec.op_flags &
210 RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
211 !(cap->capability_flags &
212 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
214 "INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
217 &test_vector.turbo_dec.op_flags);
220 if (!flags_match(test_vector.turbo_dec.op_flags,
221 cap->capability_flags))
223 if (nb_inputs > cap->num_buffers_src) {
224 printf("Too many inputs defined: %u, max: %u\n",
225 nb_inputs, cap->num_buffers_src);
228 if (nb_soft_outputs > cap->num_buffers_soft_out &&
229 (test_vector.turbo_dec.op_flags &
230 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
232 "Too many soft outputs defined: %u, max: %u\n",
234 cap->num_buffers_soft_out);
237 if (nb_hard_outputs > cap->num_buffers_hard_out) {
239 "Too many hard outputs defined: %u, max: %u\n",
241 cap->num_buffers_hard_out);
244 if (intr_enabled && !(cap->capability_flags &
245 RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
247 "Dequeue interrupts are not supported!\n");
252 } else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
253 const struct rte_bbdev_op_cap_turbo_enc *cap =
254 &op_cap->cap.turbo_enc;
256 if (!flags_match(test_vector.turbo_enc.op_flags,
257 cap->capability_flags))
259 if (nb_inputs > cap->num_buffers_src) {
260 printf("Too many inputs defined: %u, max: %u\n",
261 nb_inputs, cap->num_buffers_src);
264 if (nb_hard_outputs > cap->num_buffers_dst) {
266 "Too many hard outputs defined: %u, max: %u\n",
267 nb_hard_outputs, cap->num_buffers_dst);
270 if (intr_enabled && !(cap->capability_flags &
271 RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
273 "Dequeue interrupts are not supported!\n");
278 } else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
279 const struct rte_bbdev_op_cap_ldpc_enc *cap =
280 &op_cap->cap.ldpc_enc;
282 if (!flags_match(test_vector.ldpc_enc.op_flags,
283 cap->capability_flags)){
284 printf("Flag Mismatch\n");
287 if (nb_inputs > cap->num_buffers_src) {
288 printf("Too many inputs defined: %u, max: %u\n",
289 nb_inputs, cap->num_buffers_src);
292 if (nb_hard_outputs > cap->num_buffers_dst) {
294 "Too many hard outputs defined: %u, max: %u\n",
295 nb_hard_outputs, cap->num_buffers_dst);
298 if (intr_enabled && !(cap->capability_flags &
299 RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
301 "Dequeue interrupts are not supported!\n");
306 } else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
307 const struct rte_bbdev_op_cap_ldpc_dec *cap =
308 &op_cap->cap.ldpc_dec;
310 if (!flags_match(test_vector.ldpc_dec.op_flags,
311 cap->capability_flags)){
312 printf("Flag Mismatch\n");
315 if (nb_inputs > cap->num_buffers_src) {
316 printf("Too many inputs defined: %u, max: %u\n",
317 nb_inputs, cap->num_buffers_src);
320 if (nb_hard_outputs > cap->num_buffers_hard_out) {
322 "Too many hard outputs defined: %u, max: %u\n",
324 cap->num_buffers_hard_out);
327 if (nb_harq_inputs > cap->num_buffers_hard_out) {
329 "Too many HARQ inputs defined: %u, max: %u\n",
331 cap->num_buffers_hard_out);
334 if (nb_harq_outputs > cap->num_buffers_hard_out) {
336 "Too many HARQ outputs defined: %u, max: %u\n",
338 cap->num_buffers_hard_out);
341 if (intr_enabled && !(cap->capability_flags &
342 RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
344 "Dequeue interrupts are not supported!\n");
352 if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
353 return TEST_SUCCESS; /* Special case for NULL device */
358 /* calculates optimal mempool size not smaller than the val */
360 optimal_mempool_size(unsigned int val)
362 return rte_align32pow2(val + 1) - 1;
365 /* allocates mbuf mempool for inputs and outputs */
366 static struct rte_mempool *
367 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
368 int socket_id, unsigned int mbuf_pool_size,
369 const char *op_type_str)
372 uint32_t max_seg_sz = 0;
373 char pool_name[RTE_MEMPOOL_NAMESIZE];
375 /* find max input segment size */
376 for (i = 0; i < entries->nb_segments; ++i)
377 if (entries->segments[i].length > max_seg_sz)
378 max_seg_sz = entries->segments[i].length;
380 snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
382 return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
383 RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM,
384 (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
388 create_mempools(struct active_device *ad, int socket_id,
389 enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
391 struct rte_mempool *mp;
392 unsigned int ops_pool_size, mbuf_pool_size = 0;
393 char pool_name[RTE_MEMPOOL_NAMESIZE];
394 const char *op_type_str;
395 enum rte_bbdev_op_type op_type = org_op_type;
397 struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
398 struct op_data_entries *hard_out =
399 &test_vector.entries[DATA_HARD_OUTPUT];
400 struct op_data_entries *soft_out =
401 &test_vector.entries[DATA_SOFT_OUTPUT];
402 struct op_data_entries *harq_in =
403 &test_vector.entries[DATA_HARQ_INPUT];
404 struct op_data_entries *harq_out =
405 &test_vector.entries[DATA_HARQ_OUTPUT];
407 /* allocate ops mempool */
408 ops_pool_size = optimal_mempool_size(RTE_MAX(
409 /* Ops used plus 1 reference op */
410 RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
411 /* Minimal cache size plus 1 reference op */
412 (unsigned int)(1.5 * rte_lcore_count() *
413 OPS_CACHE_SIZE + 1)),
416 if (org_op_type == RTE_BBDEV_OP_NONE)
417 op_type = RTE_BBDEV_OP_TURBO_ENC;
419 op_type_str = rte_bbdev_op_type_str(op_type);
420 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
422 snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
424 mp = rte_bbdev_op_pool_create(pool_name, op_type,
425 ops_pool_size, OPS_CACHE_SIZE, socket_id);
426 TEST_ASSERT_NOT_NULL(mp,
427 "ERROR Failed to create %u items ops pool for dev %u on socket %u.",
431 ad->ops_mempool = mp;
433 /* Do not create inputs and outputs mbufs for BaseBand Null Device */
434 if (org_op_type == RTE_BBDEV_OP_NONE)
438 mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
439 mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
440 TEST_ASSERT_NOT_NULL(mp,
441 "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
445 ad->in_mbuf_pool = mp;
448 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
449 hard_out->nb_segments);
450 mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size,
452 TEST_ASSERT_NOT_NULL(mp,
453 "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
457 ad->hard_out_mbuf_pool = mp;
461 if (soft_out->nb_segments > 0) {
462 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
463 soft_out->nb_segments);
464 mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
467 TEST_ASSERT_NOT_NULL(mp,
468 "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
472 ad->soft_out_mbuf_pool = mp;
476 if (harq_in->nb_segments > 0) {
477 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
478 harq_in->nb_segments);
479 mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
482 TEST_ASSERT_NOT_NULL(mp,
483 "ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
487 ad->harq_in_mbuf_pool = mp;
491 if (harq_out->nb_segments > 0) {
492 mbuf_pool_size = optimal_mempool_size(ops_pool_size *
493 harq_out->nb_segments);
494 mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
497 TEST_ASSERT_NOT_NULL(mp,
498 "ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
502 ad->harq_out_mbuf_pool = mp;
509 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
510 struct test_bbdev_vector *vector)
513 unsigned int queue_id;
514 struct rte_bbdev_queue_conf qconf;
515 struct active_device *ad = &active_devs[nb_active_devs];
516 unsigned int nb_queues;
517 enum rte_bbdev_op_type op_type = vector->op_type;
519 /* Configure fpga lte fec with PF & VF values
520 * if '-i' flag is set and using fpga device
522 #ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
523 if ((get_init_device() == true) &&
524 (!strcmp(info->drv.driver_name, FPGA_PF_DRIVER_NAME))) {
525 struct fpga_lte_fec_conf conf;
528 printf("Configure FPGA FEC Driver %s with default values\n",
529 info->drv.driver_name);
531 /* clear default configuration before initialization */
532 memset(&conf, 0, sizeof(struct fpga_lte_fec_conf));
535 * true if PF is used for data plane
538 conf.pf_mode_en = true;
540 for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
541 /* Number of UL queues per VF (fpga supports 8 VFs) */
542 conf.vf_ul_queues_number[i] = VF_UL_QUEUE_VALUE;
543 /* Number of DL queues per VF (fpga supports 8 VFs) */
544 conf.vf_dl_queues_number[i] = VF_DL_QUEUE_VALUE;
547 /* UL bandwidth. Needed for schedule algorithm */
548 conf.ul_bandwidth = UL_BANDWIDTH;
550 conf.dl_bandwidth = DL_BANDWIDTH;
552 /* UL & DL load Balance Factor to 64 */
553 conf.ul_load_balance = UL_LOAD_BALANCE;
554 conf.dl_load_balance = DL_LOAD_BALANCE;
556 /**< FLR timeout value */
557 conf.flr_time_out = FLR_TIMEOUT;
559 /* setup FPGA PF with configuration information */
560 ret = fpga_lte_fec_configure(info->dev_name, &conf);
561 TEST_ASSERT_SUCCESS(ret,
562 "Failed to configure 4G FPGA PF for bbdev %s",
566 nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
567 nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
570 ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
572 printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
573 dev_id, nb_queues, info->socket_id, ret);
577 /* configure interrupts if needed */
579 ret = rte_bbdev_intr_enable(dev_id);
581 printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
587 /* setup device queues */
588 qconf.socket = info->socket_id;
589 qconf.queue_size = info->drv.default_queue_conf.queue_size;
591 qconf.deferred_start = 0;
592 qconf.op_type = op_type;
594 for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
595 ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
598 "Allocated all queues (id=%u) at prio%u on dev%u\n",
599 queue_id, qconf.priority, dev_id);
601 ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
605 printf("All queues on dev %u allocated: %u\n",
609 ad->queue_ids[queue_id] = queue_id;
611 TEST_ASSERT(queue_id != 0,
612 "ERROR Failed to configure any queues on dev %u",
614 ad->nb_queues = queue_id;
616 set_avail_op(ad, op_type);
622 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
623 struct test_bbdev_vector *vector)
627 active_devs[nb_active_devs].driver_name = info->drv.driver_name;
628 active_devs[nb_active_devs].dev_id = dev_id;
630 ret = add_bbdev_dev(dev_id, info, vector);
631 if (ret == TEST_SUCCESS)
637 populate_active_devices(void)
641 uint8_t nb_devs_added = 0;
642 struct rte_bbdev_info info;
644 RTE_BBDEV_FOREACH(dev_id) {
645 rte_bbdev_info_get(dev_id, &info);
647 if (check_dev_cap(&info)) {
649 "Device %d (%s) does not support specified capabilities\n",
650 dev_id, info.dev_name);
654 ret = add_active_device(dev_id, &info, &test_vector);
656 printf("Adding active bbdev %s skipped\n",
663 return nb_devs_added;
667 read_test_vector(void)
671 memset(&test_vector, 0, sizeof(test_vector));
672 printf("Test vector file = %s\n", get_vector_filename());
673 ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
674 TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
675 get_vector_filename());
681 testsuite_setup(void)
683 TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
685 if (populate_active_devices() == 0) {
686 printf("No suitable devices found!\n");
694 interrupt_testsuite_setup(void)
696 TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
698 /* Enable interrupts */
701 /* Special case for NULL device (RTE_BBDEV_OP_NONE) */
702 if (populate_active_devices() == 0 ||
703 test_vector.op_type == RTE_BBDEV_OP_NONE) {
704 intr_enabled = false;
705 printf("No suitable devices found!\n");
713 testsuite_teardown(void)
717 /* Unconfigure devices */
718 RTE_BBDEV_FOREACH(dev_id)
719 rte_bbdev_close(dev_id);
721 /* Clear active devices structs. */
722 memset(active_devs, 0, sizeof(active_devs));
731 for (i = 0; i < nb_active_devs; i++) {
732 dev_id = active_devs[i].dev_id;
733 /* reset bbdev stats */
734 TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
735 "Failed to reset stats of bbdev %u", dev_id);
736 /* start the device */
737 TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
738 "Failed to start bbdev %u", dev_id);
748 struct rte_bbdev_stats stats;
750 for (i = 0; i < nb_active_devs; i++) {
751 dev_id = active_devs[i].dev_id;
752 /* read stats and print */
753 rte_bbdev_stats_get(dev_id, &stats);
754 /* Stop the device */
755 rte_bbdev_stop(dev_id);
760 init_op_data_objs(struct rte_bbdev_op_data *bufs,
761 struct op_data_entries *ref_entries,
762 struct rte_mempool *mbuf_pool, const uint16_t n,
763 enum op_data_type op_type, uint16_t min_alignment)
767 bool large_input = false;
769 for (i = 0; i < n; ++i) {
771 struct op_data_buf *seg = &ref_entries->segments[0];
772 struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
773 TEST_ASSERT_NOT_NULL(m_head,
774 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
775 op_type, n * ref_entries->nb_segments,
778 if (seg->length > RTE_BBDEV_LDPC_E_MAX_MBUF) {
780 * Special case when DPDK mbuf cannot handle
781 * the required input size
783 printf("Warning: Larger input size than DPDK mbuf %d\n",
787 bufs[i].data = m_head;
791 if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
792 if ((op_type == DATA_INPUT) && large_input) {
793 /* Allocate a fake overused mbuf */
794 data = rte_malloc(NULL, seg->length, 0);
795 memcpy(data, seg->addr, seg->length);
796 m_head->buf_addr = data;
797 m_head->buf_iova = rte_malloc_virt2iova(data);
798 m_head->data_off = 0;
799 m_head->data_len = seg->length;
801 data = rte_pktmbuf_append(m_head, seg->length);
802 TEST_ASSERT_NOT_NULL(data,
803 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
804 seg->length, op_type);
806 TEST_ASSERT(data == RTE_PTR_ALIGN(
807 data, min_alignment),
808 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
809 data, min_alignment);
810 rte_memcpy(data, seg->addr, seg->length);
813 bufs[i].length += seg->length;
815 for (j = 1; j < ref_entries->nb_segments; ++j) {
816 struct rte_mbuf *m_tail =
817 rte_pktmbuf_alloc(mbuf_pool);
818 TEST_ASSERT_NOT_NULL(m_tail,
819 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
821 n * ref_entries->nb_segments,
825 data = rte_pktmbuf_append(m_tail, seg->length);
826 TEST_ASSERT_NOT_NULL(data,
827 "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
828 seg->length, op_type);
830 TEST_ASSERT(data == RTE_PTR_ALIGN(data,
832 "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
833 data, min_alignment);
834 rte_memcpy(data, seg->addr, seg->length);
835 bufs[i].length += seg->length;
837 ret = rte_pktmbuf_chain(m_head, m_tail);
838 TEST_ASSERT_SUCCESS(ret,
839 "Couldn't chain mbufs from %d data type mbuf pool",
844 /* allocate chained-mbuf for output buffer */
845 for (j = 1; j < ref_entries->nb_segments; ++j) {
846 struct rte_mbuf *m_tail =
847 rte_pktmbuf_alloc(mbuf_pool);
848 TEST_ASSERT_NOT_NULL(m_tail,
849 "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
851 n * ref_entries->nb_segments,
854 ret = rte_pktmbuf_chain(m_head, m_tail);
855 TEST_ASSERT_SUCCESS(ret,
856 "Couldn't chain mbufs from %d data type mbuf pool",
866 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
871 *buffers = rte_zmalloc_socket(NULL, len, 0, socket);
872 if (*buffers == NULL) {
873 printf("WARNING: Failed to allocate op_data on socket %d\n",
875 /* try to allocate memory on other detected sockets */
876 for (i = 0; i < socket; i++) {
877 *buffers = rte_zmalloc_socket(NULL, len, 0, i);
878 if (*buffers != NULL)
883 return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
887 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
888 const uint16_t n, const int8_t max_llr_modulus)
890 uint16_t i, byte_idx;
892 for (i = 0; i < n; ++i) {
893 struct rte_mbuf *m = input_ops[i].data;
895 int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
896 input_ops[i].offset);
897 for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
899 llr[byte_idx] = round((double)max_llr_modulus *
900 llr[byte_idx] / INT8_MAX);
908 ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
909 const uint16_t n, const int8_t llr_size,
910 const int8_t llr_decimals)
912 if (input_ops == NULL)
915 uint16_t i, byte_idx;
917 int16_t llr_max, llr_min, llr_tmp;
918 llr_max = (1 << (llr_size - 1)) - 1;
920 for (i = 0; i < n; ++i) {
921 struct rte_mbuf *m = input_ops[i].data;
923 int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
924 input_ops[i].offset);
925 for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
928 llr_tmp = llr[byte_idx];
929 if (llr_decimals == 2)
931 else if (llr_decimals == 0)
933 llr_tmp = RTE_MIN(llr_max,
934 RTE_MAX(llr_min, llr_tmp));
935 llr[byte_idx] = (int8_t) llr_tmp;
946 fill_queue_buffers(struct test_op_params *op_params,
947 struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
948 struct rte_mempool *soft_out_mp,
949 struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
951 const struct rte_bbdev_op_cap *capabilities,
952 uint16_t min_alignment, const int socket_id)
955 enum op_data_type type;
956 const uint16_t n = op_params->num_to_process;
958 struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
966 struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
967 &op_params->q_bufs[socket_id][queue_id].inputs,
968 &op_params->q_bufs[socket_id][queue_id].soft_outputs,
969 &op_params->q_bufs[socket_id][queue_id].hard_outputs,
970 &op_params->q_bufs[socket_id][queue_id].harq_inputs,
971 &op_params->q_bufs[socket_id][queue_id].harq_outputs,
974 for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
975 struct op_data_entries *ref_entries =
976 &test_vector.entries[type];
977 if (ref_entries->nb_segments == 0)
980 ret = allocate_buffers_on_socket(queue_ops[type],
981 n * sizeof(struct rte_bbdev_op_data),
983 TEST_ASSERT_SUCCESS(ret,
984 "Couldn't allocate memory for rte_bbdev_op_data structs");
986 ret = init_op_data_objs(*queue_ops[type], ref_entries,
987 mbuf_pools[type], n, type, min_alignment);
988 TEST_ASSERT_SUCCESS(ret,
989 "Couldn't init rte_bbdev_op_data structs");
992 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
993 limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
994 capabilities->cap.turbo_dec.max_llr_modulus);
996 if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
997 ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
998 capabilities->cap.ldpc_dec.llr_size,
999 capabilities->cap.ldpc_dec.llr_decimals);
1000 ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
1001 capabilities->cap.ldpc_dec.llr_size,
1002 capabilities->cap.ldpc_dec.llr_decimals);
1009 free_buffers(struct active_device *ad, struct test_op_params *op_params)
1013 rte_mempool_free(ad->ops_mempool);
1014 rte_mempool_free(ad->in_mbuf_pool);
1015 rte_mempool_free(ad->hard_out_mbuf_pool);
1016 rte_mempool_free(ad->soft_out_mbuf_pool);
1017 rte_mempool_free(ad->harq_in_mbuf_pool);
1018 rte_mempool_free(ad->harq_out_mbuf_pool);
1020 for (i = 0; i < rte_lcore_count(); ++i) {
1021 for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
1022 rte_free(op_params->q_bufs[j][i].inputs);
1023 rte_free(op_params->q_bufs[j][i].hard_outputs);
1024 rte_free(op_params->q_bufs[j][i].soft_outputs);
1025 rte_free(op_params->q_bufs[j][i].harq_inputs);
1026 rte_free(op_params->q_bufs[j][i].harq_outputs);
1032 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
1033 unsigned int start_idx,
1034 struct rte_bbdev_op_data *inputs,
1035 struct rte_bbdev_op_data *hard_outputs,
1036 struct rte_bbdev_op_data *soft_outputs,
1037 struct rte_bbdev_dec_op *ref_op)
1040 struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
1042 for (i = 0; i < n; ++i) {
1043 if (turbo_dec->code_block_mode == 0) {
1044 ops[i]->turbo_dec.tb_params.ea =
1045 turbo_dec->tb_params.ea;
1046 ops[i]->turbo_dec.tb_params.eb =
1047 turbo_dec->tb_params.eb;
1048 ops[i]->turbo_dec.tb_params.k_pos =
1049 turbo_dec->tb_params.k_pos;
1050 ops[i]->turbo_dec.tb_params.k_neg =
1051 turbo_dec->tb_params.k_neg;
1052 ops[i]->turbo_dec.tb_params.c =
1053 turbo_dec->tb_params.c;
1054 ops[i]->turbo_dec.tb_params.c_neg =
1055 turbo_dec->tb_params.c_neg;
1056 ops[i]->turbo_dec.tb_params.cab =
1057 turbo_dec->tb_params.cab;
1058 ops[i]->turbo_dec.tb_params.r =
1059 turbo_dec->tb_params.r;
1061 ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
1062 ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
1065 ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
1066 ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
1067 ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
1068 ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
1069 ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
1070 ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
1071 ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
1073 ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
1074 ops[i]->turbo_dec.input = inputs[start_idx + i];
1075 if (soft_outputs != NULL)
1076 ops[i]->turbo_dec.soft_output =
1077 soft_outputs[start_idx + i];
1082 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
1083 unsigned int start_idx,
1084 struct rte_bbdev_op_data *inputs,
1085 struct rte_bbdev_op_data *outputs,
1086 struct rte_bbdev_enc_op *ref_op)
1089 struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
1090 for (i = 0; i < n; ++i) {
1091 if (turbo_enc->code_block_mode == 0) {
1092 ops[i]->turbo_enc.tb_params.ea =
1093 turbo_enc->tb_params.ea;
1094 ops[i]->turbo_enc.tb_params.eb =
1095 turbo_enc->tb_params.eb;
1096 ops[i]->turbo_enc.tb_params.k_pos =
1097 turbo_enc->tb_params.k_pos;
1098 ops[i]->turbo_enc.tb_params.k_neg =
1099 turbo_enc->tb_params.k_neg;
1100 ops[i]->turbo_enc.tb_params.c =
1101 turbo_enc->tb_params.c;
1102 ops[i]->turbo_enc.tb_params.c_neg =
1103 turbo_enc->tb_params.c_neg;
1104 ops[i]->turbo_enc.tb_params.cab =
1105 turbo_enc->tb_params.cab;
1106 ops[i]->turbo_enc.tb_params.ncb_pos =
1107 turbo_enc->tb_params.ncb_pos;
1108 ops[i]->turbo_enc.tb_params.ncb_neg =
1109 turbo_enc->tb_params.ncb_neg;
1110 ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
1112 ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
1113 ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
1114 ops[i]->turbo_enc.cb_params.ncb =
1115 turbo_enc->cb_params.ncb;
1117 ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
1118 ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
1119 ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
1121 ops[i]->turbo_enc.output = outputs[start_idx + i];
1122 ops[i]->turbo_enc.input = inputs[start_idx + i];
1127 copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
1128 unsigned int start_idx,
1129 struct rte_bbdev_op_data *inputs,
1130 struct rte_bbdev_op_data *hard_outputs,
1131 struct rte_bbdev_op_data *soft_outputs,
1132 struct rte_bbdev_op_data *harq_inputs,
1133 struct rte_bbdev_op_data *harq_outputs,
1134 struct rte_bbdev_dec_op *ref_op)
1137 struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
1139 for (i = 0; i < n; ++i) {
1140 if (ldpc_dec->code_block_mode == 0) {
1141 ops[i]->ldpc_dec.tb_params.ea =
1142 ldpc_dec->tb_params.ea;
1143 ops[i]->ldpc_dec.tb_params.eb =
1144 ldpc_dec->tb_params.eb;
1145 ops[i]->ldpc_dec.tb_params.c =
1146 ldpc_dec->tb_params.c;
1147 ops[i]->ldpc_dec.tb_params.cab =
1148 ldpc_dec->tb_params.cab;
1149 ops[i]->ldpc_dec.tb_params.r =
1150 ldpc_dec->tb_params.r;
1152 ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
1155 ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
1156 ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
1157 ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
1158 ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
1159 ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
1160 ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
1161 ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
1162 ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
1163 ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
1165 ops[i]->ldpc_dec.hard_output = hard_outputs[start_idx + i];
1166 ops[i]->ldpc_dec.input = inputs[start_idx + i];
1167 if (soft_outputs != NULL)
1168 ops[i]->ldpc_dec.soft_output =
1169 soft_outputs[start_idx + i];
1170 if (harq_inputs != NULL)
1171 ops[i]->ldpc_dec.harq_combined_input =
1172 harq_inputs[start_idx + i];
1173 if (harq_outputs != NULL)
1174 ops[i]->ldpc_dec.harq_combined_output =
1175 harq_outputs[start_idx + i];
1181 copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
1182 unsigned int start_idx,
1183 struct rte_bbdev_op_data *inputs,
1184 struct rte_bbdev_op_data *outputs,
1185 struct rte_bbdev_enc_op *ref_op)
1188 struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
1189 for (i = 0; i < n; ++i) {
1190 if (ldpc_enc->code_block_mode == 0) {
1191 ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
1192 ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
1193 ops[i]->ldpc_enc.tb_params.cab =
1194 ldpc_enc->tb_params.cab;
1195 ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
1196 ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
1198 ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
1200 ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
1201 ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
1202 ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
1203 ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
1204 ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
1205 ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
1206 ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
1207 ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
1208 ops[i]->ldpc_enc.output = outputs[start_idx + i];
1209 ops[i]->ldpc_enc.input = inputs[start_idx + i];
1214 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
1215 unsigned int order_idx, const int expected_status)
1217 TEST_ASSERT(op->status == expected_status,
1218 "op_status (%d) != expected_status (%d)",
1219 op->status, expected_status);
1221 TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1222 "Ordering error, expected %p, got %p",
1223 (void *)(uintptr_t)order_idx, op->opaque_data);
1225 return TEST_SUCCESS;
1229 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
1230 unsigned int order_idx, const int expected_status)
1232 TEST_ASSERT(op->status == expected_status,
1233 "op_status (%d) != expected_status (%d)",
1234 op->status, expected_status);
1236 TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1237 "Ordering error, expected %p, got %p",
1238 (void *)(uintptr_t)order_idx, op->opaque_data);
1240 return TEST_SUCCESS;
1244 validate_op_chain(struct rte_bbdev_op_data *op,
1245 struct op_data_entries *orig_op)
1248 struct rte_mbuf *m = op->data;
1249 uint8_t nb_dst_segments = orig_op->nb_segments;
1250 uint32_t total_data_size = 0;
1252 TEST_ASSERT(nb_dst_segments == m->nb_segs,
1253 "Number of segments differ in original (%u) and filled (%u) op",
1254 nb_dst_segments, m->nb_segs);
1256 /* Validate each mbuf segment length */
1257 for (i = 0; i < nb_dst_segments; ++i) {
1258 /* Apply offset to the first mbuf segment */
1259 uint16_t offset = (i == 0) ? op->offset : 0;
1260 uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
1261 total_data_size += orig_op->segments[i].length;
1263 TEST_ASSERT(orig_op->segments[i].length == data_len,
1264 "Length of segment differ in original (%u) and filled (%u) op",
1265 orig_op->segments[i].length, data_len);
1266 TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
1267 rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
1269 "Output buffers (CB=%u) are not equal", i);
1273 /* Validate total mbuf pkt length */
1274 uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
1275 TEST_ASSERT(total_data_size == pkt_len,
1276 "Length of data differ in original (%u) and filled (%u) op",
1277 total_data_size, pkt_len);
1279 return TEST_SUCCESS;
1283 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
1284 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
1288 struct op_data_entries *hard_data_orig =
1289 &test_vector.entries[DATA_HARD_OUTPUT];
1290 struct op_data_entries *soft_data_orig =
1291 &test_vector.entries[DATA_SOFT_OUTPUT];
1292 struct rte_bbdev_op_turbo_dec *ops_td;
1293 struct rte_bbdev_op_data *hard_output;
1294 struct rte_bbdev_op_data *soft_output;
1295 struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
1297 for (i = 0; i < n; ++i) {
1298 ops_td = &ops[i]->turbo_dec;
1299 hard_output = &ops_td->hard_output;
1300 soft_output = &ops_td->soft_output;
1302 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
1303 TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
1304 "Returned iter_count (%d) > expected iter_count (%d)",
1305 ops_td->iter_count, ref_td->iter_count);
1306 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
1307 TEST_ASSERT_SUCCESS(ret,
1308 "Checking status and ordering for decoder failed");
1310 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
1312 "Hard output buffers (CB=%u) are not equal",
1315 if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
1316 TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
1318 "Soft output buffers (CB=%u) are not equal",
1322 return TEST_SUCCESS;
1327 validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
1328 struct rte_bbdev_dec_op *ref_op, const int vector_mask)
1332 struct op_data_entries *hard_data_orig =
1333 &test_vector.entries[DATA_HARD_OUTPUT];
1334 struct op_data_entries *soft_data_orig =
1335 &test_vector.entries[DATA_SOFT_OUTPUT];
1336 struct op_data_entries *harq_data_orig =
1337 &test_vector.entries[DATA_HARQ_OUTPUT];
1338 struct rte_bbdev_op_ldpc_dec *ops_td;
1339 struct rte_bbdev_op_data *hard_output;
1340 struct rte_bbdev_op_data *harq_output;
1341 struct rte_bbdev_op_data *soft_output;
1342 struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
1344 for (i = 0; i < n; ++i) {
1345 ops_td = &ops[i]->ldpc_dec;
1346 hard_output = &ops_td->hard_output;
1347 harq_output = &ops_td->harq_combined_output;
1348 soft_output = &ops_td->soft_output;
1350 ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
1351 TEST_ASSERT_SUCCESS(ret,
1352 "Checking status and ordering for decoder failed");
1353 if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
1354 TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
1355 "Returned iter_count (%d) > expected iter_count (%d)",
1356 ops_td->iter_count, ref_td->iter_count);
1357 /* We can ignore data when the decoding failed to converge */
1358 if ((ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR)) == 0)
1359 TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
1361 "Hard output buffers (CB=%u) are not equal",
1364 if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
1365 TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
1367 "Soft output buffers (CB=%u) are not equal",
1369 if (ref_op->ldpc_dec.op_flags &
1370 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
1371 ldpc_input_llr_scaling(harq_output, 1, 8, 0);
1372 TEST_ASSERT_SUCCESS(validate_op_chain(harq_output,
1374 "HARQ output buffers (CB=%u) are not equal",
1379 return TEST_SUCCESS;
1384 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
1385 struct rte_bbdev_enc_op *ref_op)
1389 struct op_data_entries *hard_data_orig =
1390 &test_vector.entries[DATA_HARD_OUTPUT];
1392 for (i = 0; i < n; ++i) {
1393 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
1394 TEST_ASSERT_SUCCESS(ret,
1395 "Checking status and ordering for encoder failed");
1396 TEST_ASSERT_SUCCESS(validate_op_chain(
1397 &ops[i]->turbo_enc.output,
1399 "Output buffers (CB=%u) are not equal",
1403 return TEST_SUCCESS;
1407 validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
1408 struct rte_bbdev_enc_op *ref_op)
1412 struct op_data_entries *hard_data_orig =
1413 &test_vector.entries[DATA_HARD_OUTPUT];
1415 for (i = 0; i < n; ++i) {
1416 ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
1417 TEST_ASSERT_SUCCESS(ret,
1418 "Checking status and ordering for encoder failed");
1419 TEST_ASSERT_SUCCESS(validate_op_chain(
1420 &ops[i]->ldpc_enc.output,
1422 "Output buffers (CB=%u) are not equal",
1426 return TEST_SUCCESS;
1430 create_reference_dec_op(struct rte_bbdev_dec_op *op)
1433 struct op_data_entries *entry;
1435 op->turbo_dec = test_vector.turbo_dec;
1436 entry = &test_vector.entries[DATA_INPUT];
1437 for (i = 0; i < entry->nb_segments; ++i)
1438 op->turbo_dec.input.length +=
1439 entry->segments[i].length;
1443 create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
1446 struct op_data_entries *entry;
1448 op->ldpc_dec = test_vector.ldpc_dec;
1449 entry = &test_vector.entries[DATA_INPUT];
1450 for (i = 0; i < entry->nb_segments; ++i)
1451 op->ldpc_dec.input.length +=
1452 entry->segments[i].length;
1453 if (test_vector.ldpc_dec.op_flags &
1454 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
1455 entry = &test_vector.entries[DATA_HARQ_INPUT];
1456 for (i = 0; i < entry->nb_segments; ++i)
1457 op->ldpc_dec.harq_combined_input.length +=
1458 entry->segments[i].length;
1464 create_reference_enc_op(struct rte_bbdev_enc_op *op)
1467 struct op_data_entries *entry;
1469 op->turbo_enc = test_vector.turbo_enc;
1470 entry = &test_vector.entries[DATA_INPUT];
1471 for (i = 0; i < entry->nb_segments; ++i)
1472 op->turbo_enc.input.length +=
1473 entry->segments[i].length;
1477 create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
1480 struct op_data_entries *entry;
1482 op->ldpc_enc = test_vector.ldpc_enc;
1483 entry = &test_vector.entries[DATA_INPUT];
1484 for (i = 0; i < entry->nb_segments; ++i)
1485 op->ldpc_enc.input.length +=
1486 entry->segments[i].length;
1490 calc_dec_TB_size(struct rte_bbdev_dec_op *op)
1493 uint32_t c, r, tb_size = 0;
1495 if (op->turbo_dec.code_block_mode) {
1496 tb_size = op->turbo_dec.tb_params.k_neg;
1498 c = op->turbo_dec.tb_params.c;
1499 r = op->turbo_dec.tb_params.r;
1500 for (i = 0; i < c-r; i++)
1501 tb_size += (r < op->turbo_dec.tb_params.c_neg) ?
1502 op->turbo_dec.tb_params.k_neg :
1503 op->turbo_dec.tb_params.k_pos;
1509 calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
1512 uint32_t c, r, tb_size = 0;
1513 uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
1515 if (op->ldpc_dec.code_block_mode) {
1516 tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
1518 c = op->ldpc_dec.tb_params.c;
1519 r = op->ldpc_dec.tb_params.r;
1520 for (i = 0; i < c-r; i++)
1521 tb_size += sys_cols * op->ldpc_dec.z_c
1522 - op->ldpc_dec.n_filler;
1528 calc_enc_TB_size(struct rte_bbdev_enc_op *op)
1531 uint32_t c, r, tb_size = 0;
1533 if (op->turbo_enc.code_block_mode) {
1534 tb_size = op->turbo_enc.tb_params.k_neg;
1536 c = op->turbo_enc.tb_params.c;
1537 r = op->turbo_enc.tb_params.r;
1538 for (i = 0; i < c-r; i++)
1539 tb_size += (r < op->turbo_enc.tb_params.c_neg) ?
1540 op->turbo_enc.tb_params.k_neg :
1541 op->turbo_enc.tb_params.k_pos;
1547 calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
1550 uint32_t c, r, tb_size = 0;
1551 uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
1553 if (op->turbo_enc.code_block_mode) {
1554 tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
1556 c = op->turbo_enc.tb_params.c;
1557 r = op->turbo_enc.tb_params.r;
1558 for (i = 0; i < c-r; i++)
1559 tb_size += sys_cols * op->ldpc_enc.z_c
1560 - op->ldpc_enc.n_filler;
1567 init_test_op_params(struct test_op_params *op_params,
1568 enum rte_bbdev_op_type op_type, const int expected_status,
1569 const int vector_mask, struct rte_mempool *ops_mp,
1570 uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
1573 if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
1574 op_type == RTE_BBDEV_OP_LDPC_DEC)
1575 ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
1576 &op_params->ref_dec_op, 1);
1578 ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
1579 &op_params->ref_enc_op, 1);
1581 TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
1583 op_params->mp = ops_mp;
1584 op_params->burst_sz = burst_sz;
1585 op_params->num_to_process = num_to_process;
1586 op_params->num_lcores = num_lcores;
1587 op_params->vector_mask = vector_mask;
1588 if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
1589 op_type == RTE_BBDEV_OP_LDPC_DEC)
1590 op_params->ref_dec_op->status = expected_status;
1591 else if (op_type == RTE_BBDEV_OP_TURBO_ENC
1592 || op_type == RTE_BBDEV_OP_LDPC_ENC)
1593 op_params->ref_enc_op->status = expected_status;
1598 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
1599 struct test_op_params *op_params)
1601 int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
1603 struct active_device *ad;
1604 unsigned int burst_sz = get_burst_sz();
1605 enum rte_bbdev_op_type op_type = test_vector.op_type;
1606 const struct rte_bbdev_op_cap *capabilities = NULL;
1608 ad = &active_devs[dev_id];
1610 /* Check if device supports op_type */
1611 if (!is_avail_op(ad, test_vector.op_type))
1612 return TEST_SUCCESS;
1614 struct rte_bbdev_info info;
1615 rte_bbdev_info_get(ad->dev_id, &info);
1616 socket_id = GET_SOCKET(info.socket_id);
1618 f_ret = create_mempools(ad, socket_id, op_type,
1620 if (f_ret != TEST_SUCCESS) {
1621 printf("Couldn't create mempools");
1624 if (op_type == RTE_BBDEV_OP_NONE)
1625 op_type = RTE_BBDEV_OP_TURBO_ENC;
1627 f_ret = init_test_op_params(op_params, test_vector.op_type,
1628 test_vector.expected_status,
1634 if (f_ret != TEST_SUCCESS) {
1635 printf("Couldn't init test op params");
1640 /* Find capabilities */
1641 const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
1642 for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
1643 if (cap->type == test_vector.op_type) {
1649 TEST_ASSERT_NOT_NULL(capabilities,
1650 "Couldn't find capabilities");
1652 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
1653 create_reference_dec_op(op_params->ref_dec_op);
1654 } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1655 create_reference_enc_op(op_params->ref_enc_op);
1656 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
1657 create_reference_ldpc_enc_op(op_params->ref_enc_op);
1658 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
1659 create_reference_ldpc_dec_op(op_params->ref_dec_op);
1661 for (i = 0; i < ad->nb_queues; ++i) {
1662 f_ret = fill_queue_buffers(op_params,
1664 ad->hard_out_mbuf_pool,
1665 ad->soft_out_mbuf_pool,
1666 ad->harq_in_mbuf_pool,
1667 ad->harq_out_mbuf_pool,
1670 info.drv.min_alignment,
1672 if (f_ret != TEST_SUCCESS) {
1673 printf("Couldn't init queue buffers");
1678 /* Run test case function */
1679 t_ret = test_case_func(ad, op_params);
1681 /* Free active device resources and return */
1682 free_buffers(ad, op_params);
1686 free_buffers(ad, op_params);
1690 /* Run given test function per active device per supported op type
1694 run_test_case(test_case_function *test_case_func)
1699 /* Alloc op_params */
1700 struct test_op_params *op_params = rte_zmalloc(NULL,
1701 sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
1702 TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
1703 RTE_ALIGN(sizeof(struct test_op_params),
1704 RTE_CACHE_LINE_SIZE));
1706 /* For each device run test case function */
1707 for (dev = 0; dev < nb_active_devs; ++dev)
1708 ret |= run_test_case_on_device(test_case_func, dev, op_params);
1710 rte_free(op_params);
1716 dequeue_event_callback(uint16_t dev_id,
1717 enum rte_bbdev_event_type event, void *cb_arg,
1722 uint64_t total_time;
1723 uint16_t deq, burst_sz, num_ops;
1724 uint16_t queue_id = *(uint16_t *) ret_param;
1725 struct rte_bbdev_info info;
1727 struct thread_params *tp = cb_arg;
1729 /* Find matching thread params using queue_id */
1730 for (i = 0; i < MAX_QUEUES; ++i, ++tp)
1731 if (tp->queue_id == queue_id)
1734 if (i == MAX_QUEUES) {
1735 printf("%s: Queue_id from interrupt details was not found!\n",
1740 if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
1741 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1743 "Dequeue interrupt handler called for incorrect event!\n");
1747 burst_sz = rte_atomic16_read(&tp->burst_sz);
1748 num_ops = tp->op_params->num_to_process;
1750 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
1751 test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
1752 deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1754 rte_atomic16_read(&tp->nb_dequeued)],
1757 deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
1759 rte_atomic16_read(&tp->nb_dequeued)],
1762 if (deq < burst_sz) {
1764 "After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
1766 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1770 if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_ops) {
1771 rte_atomic16_add(&tp->nb_dequeued, deq);
1775 total_time = rte_rdtsc_precise() - tp->start_time;
1777 rte_bbdev_info_get(dev_id, &info);
1781 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
1782 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
1783 ret = validate_dec_op(tp->dec_ops, num_ops, ref_op,
1784 tp->op_params->vector_mask);
1785 /* get the max of iter_count for all dequeued ops */
1786 for (i = 0; i < num_ops; ++i)
1787 tp->iter_count = RTE_MAX(
1788 tp->dec_ops[i]->turbo_dec.iter_count,
1790 rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
1791 } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) {
1792 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
1793 ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
1794 rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
1795 } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
1796 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
1797 ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
1798 rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
1799 } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
1800 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
1801 ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
1802 tp->op_params->vector_mask);
1803 rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
1807 printf("Buffers validation failed\n");
1808 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1811 switch (test_vector.op_type) {
1812 case RTE_BBDEV_OP_TURBO_DEC:
1813 tb_len_bits = calc_dec_TB_size(tp->op_params->ref_dec_op);
1815 case RTE_BBDEV_OP_TURBO_ENC:
1816 tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
1818 case RTE_BBDEV_OP_LDPC_DEC:
1819 tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
1821 case RTE_BBDEV_OP_LDPC_ENC:
1822 tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
1824 case RTE_BBDEV_OP_NONE:
1828 printf("Unknown op type: %d\n", test_vector.op_type);
1829 rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1833 tp->ops_per_sec += ((double)num_ops) /
1834 ((double)total_time / (double)rte_get_tsc_hz());
1835 tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
1836 ((double)total_time / (double)rte_get_tsc_hz());
1838 rte_atomic16_add(&tp->nb_dequeued, deq);
1842 throughput_intr_lcore_dec(void *arg)
1844 struct thread_params *tp = arg;
1845 unsigned int enqueued;
1846 const uint16_t queue_id = tp->queue_id;
1847 const uint16_t burst_sz = tp->op_params->burst_sz;
1848 const uint16_t num_to_process = tp->op_params->num_to_process;
1849 struct rte_bbdev_dec_op *ops[num_to_process];
1850 struct test_buffers *bufs = NULL;
1851 struct rte_bbdev_info info;
1853 uint16_t num_to_enq, enq;
1855 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1856 "BURST_SIZE should be <= %u", MAX_BURST);
1858 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1859 "Failed to enable interrupts for dev: %u, queue_id: %u",
1860 tp->dev_id, queue_id);
1862 rte_bbdev_info_get(tp->dev_id, &info);
1864 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
1865 "NUM_OPS cannot exceed %u for this device",
1866 info.drv.queue_size_lim);
1868 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1870 rte_atomic16_clear(&tp->processing_status);
1871 rte_atomic16_clear(&tp->nb_dequeued);
1873 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1876 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
1878 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
1880 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1881 copy_reference_dec_op(ops, num_to_process, 0, bufs->inputs,
1882 bufs->hard_outputs, bufs->soft_outputs,
1883 tp->op_params->ref_dec_op);
1885 /* Set counter to validate the ordering */
1886 for (j = 0; j < num_to_process; ++j)
1887 ops[j]->opaque_data = (void *)(uintptr_t)j;
1889 for (j = 0; j < TEST_REPETITIONS; ++j) {
1890 for (i = 0; i < num_to_process; ++i)
1891 rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
1893 tp->start_time = rte_rdtsc_precise();
1894 for (enqueued = 0; enqueued < num_to_process;) {
1895 num_to_enq = burst_sz;
1897 if (unlikely(num_to_process - enqueued < num_to_enq))
1898 num_to_enq = num_to_process - enqueued;
1902 enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
1903 queue_id, &ops[enqueued],
1905 } while (unlikely(num_to_enq != enq));
1908 /* Write to thread burst_sz current number of enqueued
1909 * descriptors. It ensures that proper number of
1910 * descriptors will be dequeued in callback
1911 * function - needed for last batch in case where
1912 * the number of operations is not a multiple of
1915 rte_atomic16_set(&tp->burst_sz, num_to_enq);
1917 /* Wait until processing of previous batch is
1920 while (rte_atomic16_read(&tp->nb_dequeued) !=
1924 if (j != TEST_REPETITIONS - 1)
1925 rte_atomic16_clear(&tp->nb_dequeued);
1928 return TEST_SUCCESS;
1932 throughput_intr_lcore_enc(void *arg)
1934 struct thread_params *tp = arg;
1935 unsigned int enqueued;
1936 const uint16_t queue_id = tp->queue_id;
1937 const uint16_t burst_sz = tp->op_params->burst_sz;
1938 const uint16_t num_to_process = tp->op_params->num_to_process;
1939 struct rte_bbdev_enc_op *ops[num_to_process];
1940 struct test_buffers *bufs = NULL;
1941 struct rte_bbdev_info info;
1943 uint16_t num_to_enq, enq;
1945 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1946 "BURST_SIZE should be <= %u", MAX_BURST);
1948 TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1949 "Failed to enable interrupts for dev: %u, queue_id: %u",
1950 tp->dev_id, queue_id);
1952 rte_bbdev_info_get(tp->dev_id, &info);
1954 TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
1955 "NUM_OPS cannot exceed %u for this device",
1956 info.drv.queue_size_lim);
1958 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1960 rte_atomic16_clear(&tp->processing_status);
1961 rte_atomic16_clear(&tp->nb_dequeued);
1963 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1966 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
1968 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
1970 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1971 copy_reference_enc_op(ops, num_to_process, 0, bufs->inputs,
1972 bufs->hard_outputs, tp->op_params->ref_enc_op);
1974 /* Set counter to validate the ordering */
1975 for (j = 0; j < num_to_process; ++j)
1976 ops[j]->opaque_data = (void *)(uintptr_t)j;
1978 for (j = 0; j < TEST_REPETITIONS; ++j) {
1979 for (i = 0; i < num_to_process; ++i)
1980 rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
1982 tp->start_time = rte_rdtsc_precise();
1983 for (enqueued = 0; enqueued < num_to_process;) {
1984 num_to_enq = burst_sz;
1986 if (unlikely(num_to_process - enqueued < num_to_enq))
1987 num_to_enq = num_to_process - enqueued;
1991 enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
1992 queue_id, &ops[enqueued],
1994 } while (unlikely(enq != num_to_enq));
1997 /* Write to thread burst_sz current number of enqueued
1998 * descriptors. It ensures that proper number of
1999 * descriptors will be dequeued in callback
2000 * function - needed for last batch in case where
2001 * the number of operations is not a multiple of
2004 rte_atomic16_set(&tp->burst_sz, num_to_enq);
2006 /* Wait until processing of previous batch is
2009 while (rte_atomic16_read(&tp->nb_dequeued) !=
2013 if (j != TEST_REPETITIONS - 1)
2014 rte_atomic16_clear(&tp->nb_dequeued);
2017 return TEST_SUCCESS;
2021 throughput_pmd_lcore_dec(void *arg)
2023 struct thread_params *tp = arg;
2025 uint64_t total_time = 0, start_time;
2026 const uint16_t queue_id = tp->queue_id;
2027 const uint16_t burst_sz = tp->op_params->burst_sz;
2028 const uint16_t num_ops = tp->op_params->num_to_process;
2029 struct rte_bbdev_dec_op *ops_enq[num_ops];
2030 struct rte_bbdev_dec_op *ops_deq[num_ops];
2031 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2032 struct test_buffers *bufs = NULL;
2034 struct rte_bbdev_info info;
2035 uint16_t num_to_enq;
2037 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2038 "BURST_SIZE should be <= %u", MAX_BURST);
2040 rte_bbdev_info_get(tp->dev_id, &info);
2042 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
2043 "NUM_OPS cannot exceed %u for this device",
2044 info.drv.queue_size_lim);
2046 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2048 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2051 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
2052 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
2054 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2055 copy_reference_dec_op(ops_enq, num_ops, 0, bufs->inputs,
2056 bufs->hard_outputs, bufs->soft_outputs, ref_op);
2058 /* Set counter to validate the ordering */
2059 for (j = 0; j < num_ops; ++j)
2060 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2062 for (i = 0; i < TEST_REPETITIONS; ++i) {
2064 for (j = 0; j < num_ops; ++j)
2065 mbuf_reset(ops_enq[j]->turbo_dec.hard_output.data);
2067 start_time = rte_rdtsc_precise();
2069 for (enq = 0, deq = 0; enq < num_ops;) {
2070 num_to_enq = burst_sz;
2072 if (unlikely(num_ops - enq < num_to_enq))
2073 num_to_enq = num_ops - enq;
2075 enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
2076 queue_id, &ops_enq[enq], num_to_enq);
2078 deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
2079 queue_id, &ops_deq[deq], enq - deq);
2082 /* dequeue the remaining */
2084 deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
2085 queue_id, &ops_deq[deq], enq - deq);
2088 total_time += rte_rdtsc_precise() - start_time;
2092 /* get the max of iter_count for all dequeued ops */
2093 for (i = 0; i < num_ops; ++i) {
2094 tp->iter_count = RTE_MAX(ops_enq[i]->turbo_dec.iter_count,
2098 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2099 ret = validate_dec_op(ops_deq, num_ops, ref_op,
2100 tp->op_params->vector_mask);
2101 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2104 rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
2106 double tb_len_bits = calc_dec_TB_size(ref_op);
2108 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
2109 ((double)total_time / (double)rte_get_tsc_hz());
2110 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
2111 1000000.0) / ((double)total_time /
2112 (double)rte_get_tsc_hz());
2114 return TEST_SUCCESS;
2118 throughput_pmd_lcore_ldpc_dec(void *arg)
2120 struct thread_params *tp = arg;
2122 uint64_t total_time = 0, start_time;
2123 const uint16_t queue_id = tp->queue_id;
2124 const uint16_t burst_sz = tp->op_params->burst_sz;
2125 const uint16_t num_ops = tp->op_params->num_to_process;
2126 struct rte_bbdev_dec_op *ops_enq[num_ops];
2127 struct rte_bbdev_dec_op *ops_deq[num_ops];
2128 struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2129 struct test_buffers *bufs = NULL;
2131 struct rte_bbdev_info info;
2132 uint16_t num_to_enq;
2134 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2135 "BURST_SIZE should be <= %u", MAX_BURST);
2137 rte_bbdev_info_get(tp->dev_id, &info);
2139 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
2140 "NUM_OPS cannot exceed %u for this device",
2141 info.drv.queue_size_lim);
2143 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2145 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2148 ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
2149 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
2151 /* For throughput tests we need to disable early termination */
2152 if (check_bit(ref_op->ldpc_dec.op_flags,
2153 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
2154 ref_op->ldpc_dec.op_flags -=
2155 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
2156 ref_op->ldpc_dec.iter_max = 6;
2157 ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
2159 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2160 copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
2161 bufs->hard_outputs, bufs->soft_outputs,
2162 bufs->harq_inputs, bufs->harq_outputs, ref_op);
2164 /* Set counter to validate the ordering */
2165 for (j = 0; j < num_ops; ++j)
2166 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2168 for (i = 0; i < TEST_REPETITIONS; ++i) {
2169 for (j = 0; j < num_ops; ++j) {
2170 mbuf_reset(ops_enq[j]->ldpc_dec.hard_output.data);
2171 if (check_bit(ref_op->ldpc_dec.op_flags,
2172 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
2174 ops_enq[j]->ldpc_dec.harq_combined_output.data);
2177 start_time = rte_rdtsc_precise();
2179 for (enq = 0, deq = 0; enq < num_ops;) {
2180 num_to_enq = burst_sz;
2182 if (unlikely(num_ops - enq < num_to_enq))
2183 num_to_enq = num_ops - enq;
2185 enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
2186 queue_id, &ops_enq[enq], num_to_enq);
2188 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
2189 queue_id, &ops_deq[deq], enq - deq);
2192 /* dequeue the remaining */
2194 deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
2195 queue_id, &ops_deq[deq], enq - deq);
2198 total_time += rte_rdtsc_precise() - start_time;
2202 /* get the max of iter_count for all dequeued ops */
2203 for (i = 0; i < num_ops; ++i) {
2204 tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
2208 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2209 ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
2210 tp->op_params->vector_mask);
2211 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2214 rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
2216 double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
2218 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
2219 ((double)total_time / (double)rte_get_tsc_hz());
2220 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
2221 1000000.0) / ((double)total_time /
2222 (double)rte_get_tsc_hz());
2224 return TEST_SUCCESS;
2228 throughput_pmd_lcore_enc(void *arg)
2230 struct thread_params *tp = arg;
2232 uint64_t total_time = 0, start_time;
2233 const uint16_t queue_id = tp->queue_id;
2234 const uint16_t burst_sz = tp->op_params->burst_sz;
2235 const uint16_t num_ops = tp->op_params->num_to_process;
2236 struct rte_bbdev_enc_op *ops_enq[num_ops];
2237 struct rte_bbdev_enc_op *ops_deq[num_ops];
2238 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
2239 struct test_buffers *bufs = NULL;
2241 struct rte_bbdev_info info;
2242 uint16_t num_to_enq;
2244 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2245 "BURST_SIZE should be <= %u", MAX_BURST);
2247 rte_bbdev_info_get(tp->dev_id, &info);
2249 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
2250 "NUM_OPS cannot exceed %u for this device",
2251 info.drv.queue_size_lim);
2253 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2255 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2258 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
2260 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2262 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2263 copy_reference_enc_op(ops_enq, num_ops, 0, bufs->inputs,
2264 bufs->hard_outputs, ref_op);
2266 /* Set counter to validate the ordering */
2267 for (j = 0; j < num_ops; ++j)
2268 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2270 for (i = 0; i < TEST_REPETITIONS; ++i) {
2272 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2273 for (j = 0; j < num_ops; ++j)
2274 mbuf_reset(ops_enq[j]->turbo_enc.output.data);
2276 start_time = rte_rdtsc_precise();
2278 for (enq = 0, deq = 0; enq < num_ops;) {
2279 num_to_enq = burst_sz;
2281 if (unlikely(num_ops - enq < num_to_enq))
2282 num_to_enq = num_ops - enq;
2284 enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
2285 queue_id, &ops_enq[enq], num_to_enq);
2287 deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
2288 queue_id, &ops_deq[deq], enq - deq);
2291 /* dequeue the remaining */
2293 deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
2294 queue_id, &ops_deq[deq], enq - deq);
2297 total_time += rte_rdtsc_precise() - start_time;
2300 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2301 ret = validate_enc_op(ops_deq, num_ops, ref_op);
2302 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2305 rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
2307 double tb_len_bits = calc_enc_TB_size(ref_op);
2309 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
2310 ((double)total_time / (double)rte_get_tsc_hz());
2311 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
2312 / 1000000.0) / ((double)total_time /
2313 (double)rte_get_tsc_hz());
2315 return TEST_SUCCESS;
2319 throughput_pmd_lcore_ldpc_enc(void *arg)
2321 struct thread_params *tp = arg;
2323 uint64_t total_time = 0, start_time;
2324 const uint16_t queue_id = tp->queue_id;
2325 const uint16_t burst_sz = tp->op_params->burst_sz;
2326 const uint16_t num_ops = tp->op_params->num_to_process;
2327 struct rte_bbdev_enc_op *ops_enq[num_ops];
2328 struct rte_bbdev_enc_op *ops_deq[num_ops];
2329 struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
2330 struct test_buffers *bufs = NULL;
2332 struct rte_bbdev_info info;
2333 uint16_t num_to_enq;
2335 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2336 "BURST_SIZE should be <= %u", MAX_BURST);
2338 rte_bbdev_info_get(tp->dev_id, &info);
2340 TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
2341 "NUM_OPS cannot exceed %u for this device",
2342 info.drv.queue_size_lim);
2344 bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2346 while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2349 ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
2351 TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2353 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2354 copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
2355 bufs->hard_outputs, ref_op);
2357 /* Set counter to validate the ordering */
2358 for (j = 0; j < num_ops; ++j)
2359 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2361 for (i = 0; i < TEST_REPETITIONS; ++i) {
2363 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2364 for (j = 0; j < num_ops; ++j)
2365 mbuf_reset(ops_enq[j]->turbo_enc.output.data);
2367 start_time = rte_rdtsc_precise();
2369 for (enq = 0, deq = 0; enq < num_ops;) {
2370 num_to_enq = burst_sz;
2372 if (unlikely(num_ops - enq < num_to_enq))
2373 num_to_enq = num_ops - enq;
2375 enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
2376 queue_id, &ops_enq[enq], num_to_enq);
2378 deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
2379 queue_id, &ops_deq[deq], enq - deq);
2382 /* dequeue the remaining */
2384 deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
2385 queue_id, &ops_deq[deq], enq - deq);
2388 total_time += rte_rdtsc_precise() - start_time;
2391 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2392 ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
2393 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2396 rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
2398 double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
2400 tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
2401 ((double)total_time / (double)rte_get_tsc_hz());
2402 tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
2403 / 1000000.0) / ((double)total_time /
2404 (double)rte_get_tsc_hz());
2406 return TEST_SUCCESS;
2410 print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
2412 unsigned int iter = 0;
2413 double total_mops = 0, total_mbps = 0;
2415 for (iter = 0; iter < used_cores; iter++) {
2417 "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps\n",
2418 t_params[iter].lcore_id, t_params[iter].ops_per_sec,
2419 t_params[iter].mbps);
2420 total_mops += t_params[iter].ops_per_sec;
2421 total_mbps += t_params[iter].mbps;
2424 "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps\n",
2425 used_cores, total_mops, total_mbps);
2429 print_dec_throughput(struct thread_params *t_params, unsigned int used_cores)
2431 unsigned int iter = 0;
2432 double total_mops = 0, total_mbps = 0;
2433 uint8_t iter_count = 0;
2435 for (iter = 0; iter < used_cores; iter++) {
2437 "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
2438 t_params[iter].lcore_id, t_params[iter].ops_per_sec,
2439 t_params[iter].mbps, t_params[iter].iter_count);
2440 total_mops += t_params[iter].ops_per_sec;
2441 total_mbps += t_params[iter].mbps;
2442 iter_count = RTE_MAX(iter_count, t_params[iter].iter_count);
2445 "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps @ max %u iterations\n",
2446 used_cores, total_mops, total_mbps, iter_count);
2450 * Test function that determines how long an enqueue + dequeue of a burst
2451 * takes on available lcores.
2454 throughput_test(struct active_device *ad,
2455 struct test_op_params *op_params)
2458 unsigned int lcore_id, used_cores = 0;
2459 struct thread_params *t_params, *tp;
2460 struct rte_bbdev_info info;
2461 lcore_function_t *throughput_function;
2462 uint16_t num_lcores;
2463 const char *op_type_str;
2465 rte_bbdev_info_get(ad->dev_id, &info);
2467 op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
2468 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
2469 test_vector.op_type);
2471 printf("+ ------------------------------------------------------- +\n");
2472 printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
2473 info.dev_name, ad->nb_queues, op_params->burst_sz,
2474 op_params->num_to_process, op_params->num_lcores,
2476 intr_enabled ? "Interrupt mode" : "PMD mode",
2477 (double)rte_get_tsc_hz() / 1000000000.0);
2479 /* Set number of lcores */
2480 num_lcores = (ad->nb_queues < (op_params->num_lcores))
2482 : op_params->num_lcores;
2484 /* Allocate memory for thread parameters structure */
2485 t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
2486 RTE_CACHE_LINE_SIZE);
2487 TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
2488 RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
2489 RTE_CACHE_LINE_SIZE));
2492 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
2493 throughput_function = throughput_intr_lcore_dec;
2494 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2495 throughput_function = throughput_intr_lcore_dec;
2496 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
2497 throughput_function = throughput_intr_lcore_enc;
2498 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2499 throughput_function = throughput_intr_lcore_enc;
2501 throughput_function = throughput_intr_lcore_enc;
2503 /* Dequeue interrupt callback registration */
2504 ret = rte_bbdev_callback_register(ad->dev_id,
2505 RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
2512 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
2513 throughput_function = throughput_pmd_lcore_dec;
2514 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2515 throughput_function = throughput_pmd_lcore_ldpc_dec;
2516 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
2517 throughput_function = throughput_pmd_lcore_enc;
2518 else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2519 throughput_function = throughput_pmd_lcore_ldpc_enc;
2521 throughput_function = throughput_pmd_lcore_enc;
2524 rte_atomic16_set(&op_params->sync, SYNC_WAIT);
2526 /* Master core is set at first entry */
2527 t_params[0].dev_id = ad->dev_id;
2528 t_params[0].lcore_id = rte_lcore_id();
2529 t_params[0].op_params = op_params;
2530 t_params[0].queue_id = ad->queue_ids[used_cores++];
2531 t_params[0].iter_count = 0;
2533 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2534 if (used_cores >= num_lcores)
2537 t_params[used_cores].dev_id = ad->dev_id;
2538 t_params[used_cores].lcore_id = lcore_id;
2539 t_params[used_cores].op_params = op_params;
2540 t_params[used_cores].queue_id = ad->queue_ids[used_cores];
2541 t_params[used_cores].iter_count = 0;
2543 rte_eal_remote_launch(throughput_function,
2544 &t_params[used_cores++], lcore_id);
2547 rte_atomic16_set(&op_params->sync, SYNC_START);
2548 ret = throughput_function(&t_params[0]);
2550 /* Master core is always used */
2551 for (used_cores = 1; used_cores < num_lcores; used_cores++)
2552 ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
2554 /* Return if test failed */
2560 /* Print throughput if interrupts are disabled and test passed */
2561 if (!intr_enabled) {
2562 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
2563 test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2564 print_dec_throughput(t_params, num_lcores);
2566 print_enc_throughput(t_params, num_lcores);
2571 /* In interrupt TC we need to wait for the interrupt callback to deqeue
2572 * all pending operations. Skip waiting for queues which reported an
2573 * error using processing_status variable.
2574 * Wait for master lcore operations.
2577 while ((rte_atomic16_read(&tp->nb_dequeued) <
2578 op_params->num_to_process) &&
2579 (rte_atomic16_read(&tp->processing_status) !=
2583 tp->ops_per_sec /= TEST_REPETITIONS;
2584 tp->mbps /= TEST_REPETITIONS;
2585 ret |= (int)rte_atomic16_read(&tp->processing_status);
2587 /* Wait for slave lcores operations */
2588 for (used_cores = 1; used_cores < num_lcores; used_cores++) {
2589 tp = &t_params[used_cores];
2591 while ((rte_atomic16_read(&tp->nb_dequeued) <
2592 op_params->num_to_process) &&
2593 (rte_atomic16_read(&tp->processing_status) !=
2597 tp->ops_per_sec /= TEST_REPETITIONS;
2598 tp->mbps /= TEST_REPETITIONS;
2599 ret |= (int)rte_atomic16_read(&tp->processing_status);
2602 /* Print throughput if test passed */
2604 if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
2605 test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2606 print_dec_throughput(t_params, num_lcores);
2607 else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
2608 test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2609 print_enc_throughput(t_params, num_lcores);
2617 latency_test_dec(struct rte_mempool *mempool,
2618 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
2619 int vector_mask, uint16_t dev_id, uint16_t queue_id,
2620 const uint16_t num_to_process, uint16_t burst_sz,
2621 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
2623 int ret = TEST_SUCCESS;
2624 uint16_t i, j, dequeued;
2625 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2626 uint64_t start_time = 0, last_time = 0;
2628 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2629 uint16_t enq = 0, deq = 0;
2630 bool first_time = true;
2633 if (unlikely(num_to_process - dequeued < burst_sz))
2634 burst_sz = num_to_process - dequeued;
2636 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
2637 TEST_ASSERT_SUCCESS(ret,
2638 "rte_bbdev_dec_op_alloc_bulk() failed");
2639 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2640 copy_reference_dec_op(ops_enq, burst_sz, dequeued,
2646 /* Set counter to validate the ordering */
2647 for (j = 0; j < burst_sz; ++j)
2648 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2650 start_time = rte_rdtsc_precise();
2652 enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
2654 TEST_ASSERT(enq == burst_sz,
2655 "Error enqueueing burst, expected %u, got %u",
2660 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2661 &ops_deq[deq], burst_sz - deq);
2662 if (likely(first_time && (deq > 0))) {
2663 last_time = rte_rdtsc_precise() - start_time;
2666 } while (unlikely(burst_sz != deq));
2668 *max_time = RTE_MAX(*max_time, last_time);
2669 *min_time = RTE_MIN(*min_time, last_time);
2670 *total_time += last_time;
2672 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2673 ret = validate_dec_op(ops_deq, burst_sz, ref_op,
2675 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2678 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
2686 latency_test_ldpc_dec(struct rte_mempool *mempool,
2687 struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
2688 int vector_mask, uint16_t dev_id, uint16_t queue_id,
2689 const uint16_t num_to_process, uint16_t burst_sz,
2690 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
2692 int ret = TEST_SUCCESS;
2693 uint16_t i, j, dequeued;
2694 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2695 uint64_t start_time = 0, last_time = 0;
2697 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2698 uint16_t enq = 0, deq = 0;
2699 bool first_time = true;
2702 if (unlikely(num_to_process - dequeued < burst_sz))
2703 burst_sz = num_to_process - dequeued;
2705 ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
2706 TEST_ASSERT_SUCCESS(ret,
2707 "rte_bbdev_dec_op_alloc_bulk() failed");
2708 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2709 copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
2717 /* Set counter to validate the ordering */
2718 for (j = 0; j < burst_sz; ++j)
2719 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2721 start_time = rte_rdtsc_precise();
2723 enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
2724 &ops_enq[enq], burst_sz);
2725 TEST_ASSERT(enq == burst_sz,
2726 "Error enqueueing burst, expected %u, got %u",
2731 deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
2732 &ops_deq[deq], burst_sz - deq);
2733 if (likely(first_time && (deq > 0))) {
2734 last_time = rte_rdtsc_precise() - start_time;
2737 } while (unlikely(burst_sz != deq));
2739 *max_time = RTE_MAX(*max_time, last_time);
2740 *min_time = RTE_MIN(*min_time, last_time);
2741 *total_time += last_time;
2743 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2744 ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
2746 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2749 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
2757 latency_test_enc(struct rte_mempool *mempool,
2758 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
2759 uint16_t dev_id, uint16_t queue_id,
2760 const uint16_t num_to_process, uint16_t burst_sz,
2761 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
2763 int ret = TEST_SUCCESS;
2764 uint16_t i, j, dequeued;
2765 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2766 uint64_t start_time = 0, last_time = 0;
2768 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2769 uint16_t enq = 0, deq = 0;
2770 bool first_time = true;
2773 if (unlikely(num_to_process - dequeued < burst_sz))
2774 burst_sz = num_to_process - dequeued;
2776 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
2777 TEST_ASSERT_SUCCESS(ret,
2778 "rte_bbdev_enc_op_alloc_bulk() failed");
2779 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2780 copy_reference_enc_op(ops_enq, burst_sz, dequeued,
2785 /* Set counter to validate the ordering */
2786 for (j = 0; j < burst_sz; ++j)
2787 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2789 start_time = rte_rdtsc_precise();
2791 enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
2793 TEST_ASSERT(enq == burst_sz,
2794 "Error enqueueing burst, expected %u, got %u",
2799 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2800 &ops_deq[deq], burst_sz - deq);
2801 if (likely(first_time && (deq > 0))) {
2802 last_time += rte_rdtsc_precise() - start_time;
2805 } while (unlikely(burst_sz != deq));
2807 *max_time = RTE_MAX(*max_time, last_time);
2808 *min_time = RTE_MIN(*min_time, last_time);
2809 *total_time += last_time;
2811 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2812 ret = validate_enc_op(ops_deq, burst_sz, ref_op);
2813 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2816 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
2824 latency_test_ldpc_enc(struct rte_mempool *mempool,
2825 struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
2826 uint16_t dev_id, uint16_t queue_id,
2827 const uint16_t num_to_process, uint16_t burst_sz,
2828 uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
2830 int ret = TEST_SUCCESS;
2831 uint16_t i, j, dequeued;
2832 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2833 uint64_t start_time = 0, last_time = 0;
2835 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2836 uint16_t enq = 0, deq = 0;
2837 bool first_time = true;
2840 if (unlikely(num_to_process - dequeued < burst_sz))
2841 burst_sz = num_to_process - dequeued;
2843 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
2845 TEST_ASSERT_SUCCESS(ret,
2846 "rte_bbdev_enc_op_alloc_bulk() failed");
2847 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2848 copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
2853 /* Set counter to validate the ordering */
2854 for (j = 0; j < burst_sz; ++j)
2855 ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
2857 start_time = rte_rdtsc_precise();
2860 * printf("Latency Debug %d\n",
2861 * ops_enq[0]->ldpc_enc.cb_params.z_c); REMOVEME
2864 enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
2865 &ops_enq[enq], burst_sz);
2866 TEST_ASSERT(enq == burst_sz,
2867 "Error enqueueing burst, expected %u, got %u",
2872 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
2873 &ops_deq[deq], burst_sz - deq);
2874 if (likely(first_time && (deq > 0))) {
2875 last_time += rte_rdtsc_precise() - start_time;
2878 } while (unlikely(burst_sz != deq));
2880 *max_time = RTE_MAX(*max_time, last_time);
2881 *min_time = RTE_MIN(*min_time, last_time);
2882 *total_time += last_time;
2884 if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
2885 ret = validate_enc_op(ops_deq, burst_sz, ref_op);
2886 TEST_ASSERT_SUCCESS(ret, "Validation failed!");
2890 * printf("Ready to free - deq %d num_to_process %d\n", FIXME
2891 * deq, num_to_process);
2892 * printf("cache %d\n", ops_enq[0]->mempool->cache_size);
2894 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
2902 latency_test(struct active_device *ad,
2903 struct test_op_params *op_params)
2906 uint16_t burst_sz = op_params->burst_sz;
2907 const uint16_t num_to_process = op_params->num_to_process;
2908 const enum rte_bbdev_op_type op_type = test_vector.op_type;
2909 const uint16_t queue_id = ad->queue_ids[0];
2910 struct test_buffers *bufs = NULL;
2911 struct rte_bbdev_info info;
2912 uint64_t total_time, min_time, max_time;
2913 const char *op_type_str;
2915 total_time = max_time = 0;
2916 min_time = UINT64_MAX;
2918 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2919 "BURST_SIZE should be <= %u", MAX_BURST);
2921 rte_bbdev_info_get(ad->dev_id, &info);
2922 bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2924 op_type_str = rte_bbdev_op_type_str(op_type);
2925 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2927 printf("+ ------------------------------------------------------- +\n");
2928 printf("== test: validation/latency\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
2929 info.dev_name, burst_sz, num_to_process, op_type_str);
2931 if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2932 iter = latency_test_dec(op_params->mp, bufs,
2933 op_params->ref_dec_op, op_params->vector_mask,
2934 ad->dev_id, queue_id, num_to_process,
2935 burst_sz, &total_time, &min_time, &max_time);
2936 else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
2937 iter = latency_test_enc(op_params->mp, bufs,
2938 op_params->ref_enc_op, ad->dev_id, queue_id,
2939 num_to_process, burst_sz, &total_time,
2940 &min_time, &max_time);
2941 else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
2942 iter = latency_test_ldpc_enc(op_params->mp, bufs,
2943 op_params->ref_enc_op, ad->dev_id, queue_id,
2944 num_to_process, burst_sz, &total_time,
2945 &min_time, &max_time);
2946 else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
2947 iter = latency_test_ldpc_dec(op_params->mp, bufs,
2948 op_params->ref_dec_op, op_params->vector_mask,
2949 ad->dev_id, queue_id, num_to_process,
2950 burst_sz, &total_time, &min_time, &max_time);
2952 iter = latency_test_enc(op_params->mp, bufs,
2953 op_params->ref_enc_op,
2954 ad->dev_id, queue_id,
2955 num_to_process, burst_sz, &total_time,
2956 &min_time, &max_time);
2961 printf("Operation latency:\n"
2962 "\tavg: %lg cycles, %lg us\n"
2963 "\tmin: %lg cycles, %lg us\n"
2964 "\tmax: %lg cycles, %lg us\n",
2965 (double)total_time / (double)iter,
2966 (double)(total_time * 1000000) / (double)iter /
2967 (double)rte_get_tsc_hz(), (double)min_time,
2968 (double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
2969 (double)max_time, (double)(max_time * 1000000) /
2970 (double)rte_get_tsc_hz());
2972 return TEST_SUCCESS;
2975 #ifdef RTE_BBDEV_OFFLOAD_COST
2977 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
2978 struct rte_bbdev_stats *stats)
2980 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
2981 struct rte_bbdev_stats *q_stats;
2983 if (queue_id >= dev->data->num_queues)
2986 q_stats = &dev->data->queues[queue_id].queue_stats;
2988 stats->enqueued_count = q_stats->enqueued_count;
2989 stats->dequeued_count = q_stats->dequeued_count;
2990 stats->enqueue_err_count = q_stats->enqueue_err_count;
2991 stats->dequeue_err_count = q_stats->dequeue_err_count;
2992 stats->acc_offload_cycles = q_stats->acc_offload_cycles;
2998 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
2999 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
3000 uint16_t queue_id, const uint16_t num_to_process,
3001 uint16_t burst_sz, struct test_time_stats *time_st)
3003 int i, dequeued, ret;
3004 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3005 uint64_t enq_start_time, deq_start_time;
3006 uint64_t enq_sw_last_time, deq_last_time;
3007 struct rte_bbdev_stats stats;
3009 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
3010 uint16_t enq = 0, deq = 0;
3012 if (unlikely(num_to_process - dequeued < burst_sz))
3013 burst_sz = num_to_process - dequeued;
3015 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
3016 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3017 copy_reference_dec_op(ops_enq, burst_sz, dequeued,
3023 /* Start time meas for enqueue function offload latency */
3024 enq_start_time = rte_rdtsc_precise();
3026 enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
3027 &ops_enq[enq], burst_sz - enq);
3028 } while (unlikely(burst_sz != enq));
3030 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
3031 TEST_ASSERT_SUCCESS(ret,
3032 "Failed to get stats for queue (%u) of device (%u)",
3035 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
3036 stats.acc_offload_cycles;
3037 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
3039 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
3041 time_st->enq_sw_total_time += enq_sw_last_time;
3043 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
3044 stats.acc_offload_cycles);
3045 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
3046 stats.acc_offload_cycles);
3047 time_st->enq_acc_total_time += stats.acc_offload_cycles;
3049 /* give time for device to process ops */
3052 /* Start time meas for dequeue function offload latency */
3053 deq_start_time = rte_rdtsc_precise();
3054 /* Dequeue one operation */
3056 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
3058 } while (unlikely(deq != 1));
3060 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3061 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
3063 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
3065 time_st->deq_total_time += deq_last_time;
3067 /* Dequeue remaining operations if needed*/
3068 while (burst_sz != deq)
3069 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
3070 &ops_deq[deq], burst_sz - deq);
3072 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
3080 offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
3081 struct test_buffers *bufs,
3082 struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
3083 uint16_t queue_id, const uint16_t num_to_process,
3084 uint16_t burst_sz, struct test_time_stats *time_st)
3086 int i, dequeued, ret;
3087 struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3088 uint64_t enq_start_time, deq_start_time;
3089 uint64_t enq_sw_last_time, deq_last_time;
3090 struct rte_bbdev_stats stats;
3092 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
3093 uint16_t enq = 0, deq = 0;
3095 if (unlikely(num_to_process - dequeued < burst_sz))
3096 burst_sz = num_to_process - dequeued;
3098 rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
3099 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3100 copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
3108 /* Start time meas for enqueue function offload latency */
3109 enq_start_time = rte_rdtsc_precise();
3111 enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
3112 &ops_enq[enq], burst_sz - enq);
3113 } while (unlikely(burst_sz != enq));
3115 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
3116 TEST_ASSERT_SUCCESS(ret,
3117 "Failed to get stats for queue (%u) of device (%u)",
3120 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
3121 stats.acc_offload_cycles;
3122 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
3124 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
3126 time_st->enq_sw_total_time += enq_sw_last_time;
3128 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
3129 stats.acc_offload_cycles);
3130 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
3131 stats.acc_offload_cycles);
3132 time_st->enq_acc_total_time += stats.acc_offload_cycles;
3134 /* give time for device to process ops */
3137 /* Start time meas for dequeue function offload latency */
3138 deq_start_time = rte_rdtsc_precise();
3139 /* Dequeue one operation */
3141 deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
3143 } while (unlikely(deq != 1));
3145 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3146 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
3148 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
3150 time_st->deq_total_time += deq_last_time;
3152 /* Dequeue remaining operations if needed*/
3153 while (burst_sz != deq)
3154 deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
3155 &ops_deq[deq], burst_sz - deq);
3157 rte_bbdev_dec_op_free_bulk(ops_enq, deq);
3165 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
3166 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
3167 uint16_t queue_id, const uint16_t num_to_process,
3168 uint16_t burst_sz, struct test_time_stats *time_st)
3170 int i, dequeued, ret;
3171 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3172 uint64_t enq_start_time, deq_start_time;
3173 uint64_t enq_sw_last_time, deq_last_time;
3174 struct rte_bbdev_stats stats;
3176 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
3177 uint16_t enq = 0, deq = 0;
3179 if (unlikely(num_to_process - dequeued < burst_sz))
3180 burst_sz = num_to_process - dequeued;
3182 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
3183 TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
3184 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3185 copy_reference_enc_op(ops_enq, burst_sz, dequeued,
3190 /* Start time meas for enqueue function offload latency */
3191 enq_start_time = rte_rdtsc_precise();
3193 enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
3194 &ops_enq[enq], burst_sz - enq);
3195 } while (unlikely(burst_sz != enq));
3197 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
3198 TEST_ASSERT_SUCCESS(ret,
3199 "Failed to get stats for queue (%u) of device (%u)",
3202 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
3203 stats.acc_offload_cycles;
3204 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
3206 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
3208 time_st->enq_sw_total_time += enq_sw_last_time;
3210 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
3211 stats.acc_offload_cycles);
3212 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
3213 stats.acc_offload_cycles);
3214 time_st->enq_acc_total_time += stats.acc_offload_cycles;
3216 /* give time for device to process ops */
3219 /* Start time meas for dequeue function offload latency */
3220 deq_start_time = rte_rdtsc_precise();
3221 /* Dequeue one operation */
3223 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
3225 } while (unlikely(deq != 1));
3227 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3228 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
3230 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
3232 time_st->deq_total_time += deq_last_time;
3234 while (burst_sz != deq)
3235 deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
3236 &ops_deq[deq], burst_sz - deq);
3238 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
3246 offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
3247 struct test_buffers *bufs,
3248 struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
3249 uint16_t queue_id, const uint16_t num_to_process,
3250 uint16_t burst_sz, struct test_time_stats *time_st)
3252 int i, dequeued, ret;
3253 struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3254 uint64_t enq_start_time, deq_start_time;
3255 uint64_t enq_sw_last_time, deq_last_time;
3256 struct rte_bbdev_stats stats;
3258 for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
3259 uint16_t enq = 0, deq = 0;
3261 if (unlikely(num_to_process - dequeued < burst_sz))
3262 burst_sz = num_to_process - dequeued;
3264 ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
3265 TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
3266 if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3267 copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
3272 /* Start time meas for enqueue function offload latency */
3273 enq_start_time = rte_rdtsc_precise();
3275 enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
3276 &ops_enq[enq], burst_sz - enq);
3277 } while (unlikely(burst_sz != enq));
3279 ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
3280 TEST_ASSERT_SUCCESS(ret,
3281 "Failed to get stats for queue (%u) of device (%u)",
3284 enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
3285 stats.acc_offload_cycles;
3286 time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
3288 time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
3290 time_st->enq_sw_total_time += enq_sw_last_time;
3292 time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
3293 stats.acc_offload_cycles);
3294 time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
3295 stats.acc_offload_cycles);
3296 time_st->enq_acc_total_time += stats.acc_offload_cycles;
3298 /* give time for device to process ops */
3301 /* Start time meas for dequeue function offload latency */
3302 deq_start_time = rte_rdtsc_precise();
3303 /* Dequeue one operation */
3305 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
3307 } while (unlikely(deq != 1));
3309 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3310 time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
3312 time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
3314 time_st->deq_total_time += deq_last_time;
3316 while (burst_sz != deq)
3317 deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
3318 &ops_deq[deq], burst_sz - deq);
3320 rte_bbdev_enc_op_free_bulk(ops_enq, deq);
3329 offload_cost_test(struct active_device *ad,
3330 struct test_op_params *op_params)
3332 #ifndef RTE_BBDEV_OFFLOAD_COST
3334 RTE_SET_USED(op_params);
3335 printf("Offload latency test is disabled.\n");
3336 printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
3337 return TEST_SKIPPED;
3340 uint16_t burst_sz = op_params->burst_sz;
3341 const uint16_t num_to_process = op_params->num_to_process;
3342 const enum rte_bbdev_op_type op_type = test_vector.op_type;
3343 const uint16_t queue_id = ad->queue_ids[0];
3344 struct test_buffers *bufs = NULL;
3345 struct rte_bbdev_info info;
3346 const char *op_type_str;
3347 struct test_time_stats time_st;
3349 memset(&time_st, 0, sizeof(struct test_time_stats));
3350 time_st.enq_sw_min_time = UINT64_MAX;
3351 time_st.enq_acc_min_time = UINT64_MAX;
3352 time_st.deq_min_time = UINT64_MAX;
3354 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3355 "BURST_SIZE should be <= %u", MAX_BURST);
3357 rte_bbdev_info_get(ad->dev_id, &info);
3358 bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3360 op_type_str = rte_bbdev_op_type_str(op_type);
3361 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
3363 printf("+ ------------------------------------------------------- +\n");
3364 printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
3365 info.dev_name, burst_sz, num_to_process, op_type_str);
3367 if (op_type == RTE_BBDEV_OP_TURBO_DEC)
3368 iter = offload_latency_test_dec(op_params->mp, bufs,
3369 op_params->ref_dec_op, ad->dev_id, queue_id,
3370 num_to_process, burst_sz, &time_st);
3371 else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
3372 iter = offload_latency_test_enc(op_params->mp, bufs,
3373 op_params->ref_enc_op, ad->dev_id, queue_id,
3374 num_to_process, burst_sz, &time_st);
3375 else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
3376 iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
3377 op_params->ref_enc_op, ad->dev_id, queue_id,
3378 num_to_process, burst_sz, &time_st);
3379 else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
3380 iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
3381 op_params->ref_dec_op, ad->dev_id, queue_id,
3382 num_to_process, burst_sz, &time_st);
3384 iter = offload_latency_test_enc(op_params->mp, bufs,
3385 op_params->ref_enc_op, ad->dev_id, queue_id,
3386 num_to_process, burst_sz, &time_st);
3391 printf("Enqueue driver offload cost latency:\n"
3392 "\tavg: %lg cycles, %lg us\n"
3393 "\tmin: %lg cycles, %lg us\n"
3394 "\tmax: %lg cycles, %lg us\n"
3395 "Enqueue accelerator offload cost latency:\n"
3396 "\tavg: %lg cycles, %lg us\n"
3397 "\tmin: %lg cycles, %lg us\n"
3398 "\tmax: %lg cycles, %lg us\n",
3399 (double)time_st.enq_sw_total_time / (double)iter,
3400 (double)(time_st.enq_sw_total_time * 1000000) /
3401 (double)iter / (double)rte_get_tsc_hz(),
3402 (double)time_st.enq_sw_min_time,
3403 (double)(time_st.enq_sw_min_time * 1000000) /
3404 rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
3405 (double)(time_st.enq_sw_max_time * 1000000) /
3406 rte_get_tsc_hz(), (double)time_st.enq_acc_total_time /
3408 (double)(time_st.enq_acc_total_time * 1000000) /
3409 (double)iter / (double)rte_get_tsc_hz(),
3410 (double)time_st.enq_acc_min_time,
3411 (double)(time_st.enq_acc_min_time * 1000000) /
3412 rte_get_tsc_hz(), (double)time_st.enq_acc_max_time,
3413 (double)(time_st.enq_acc_max_time * 1000000) /
3416 printf("Dequeue offload cost latency - one op:\n"
3417 "\tavg: %lg cycles, %lg us\n"
3418 "\tmin: %lg cycles, %lg us\n"
3419 "\tmax: %lg cycles, %lg us\n",
3420 (double)time_st.deq_total_time / (double)iter,
3421 (double)(time_st.deq_total_time * 1000000) /
3422 (double)iter / (double)rte_get_tsc_hz(),
3423 (double)time_st.deq_min_time,
3424 (double)(time_st.deq_min_time * 1000000) /
3425 rte_get_tsc_hz(), (double)time_st.deq_max_time,
3426 (double)(time_st.deq_max_time * 1000000) /
3429 return TEST_SUCCESS;
3433 #ifdef RTE_BBDEV_OFFLOAD_COST
3435 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
3436 const uint16_t num_to_process, uint16_t burst_sz,
3437 uint64_t *deq_total_time, uint64_t *deq_min_time,
3438 uint64_t *deq_max_time)
3441 struct rte_bbdev_dec_op *ops[MAX_BURST];
3442 uint64_t deq_start_time, deq_last_time;
3444 /* Test deq offload latency from an empty queue */
3446 for (i = 0, deq_total = 0; deq_total < num_to_process;
3447 ++i, deq_total += burst_sz) {
3448 deq_start_time = rte_rdtsc_precise();
3450 if (unlikely(num_to_process - deq_total < burst_sz))
3451 burst_sz = num_to_process - deq_total;
3452 rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
3454 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3455 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
3456 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
3457 *deq_total_time += deq_last_time;
3464 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
3465 const uint16_t num_to_process, uint16_t burst_sz,
3466 uint64_t *deq_total_time, uint64_t *deq_min_time,
3467 uint64_t *deq_max_time)
3470 struct rte_bbdev_enc_op *ops[MAX_BURST];
3471 uint64_t deq_start_time, deq_last_time;
3473 /* Test deq offload latency from an empty queue */
3474 for (i = 0, deq_total = 0; deq_total < num_to_process;
3475 ++i, deq_total += burst_sz) {
3476 deq_start_time = rte_rdtsc_precise();
3478 if (unlikely(num_to_process - deq_total < burst_sz))
3479 burst_sz = num_to_process - deq_total;
3480 rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
3482 deq_last_time = rte_rdtsc_precise() - deq_start_time;
3483 *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
3484 *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
3485 *deq_total_time += deq_last_time;
3493 offload_latency_empty_q_test(struct active_device *ad,
3494 struct test_op_params *op_params)
3496 #ifndef RTE_BBDEV_OFFLOAD_COST
3498 RTE_SET_USED(op_params);
3499 printf("Offload latency empty dequeue test is disabled.\n");
3500 printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
3501 return TEST_SKIPPED;
3504 uint64_t deq_total_time, deq_min_time, deq_max_time;
3505 uint16_t burst_sz = op_params->burst_sz;
3506 const uint16_t num_to_process = op_params->num_to_process;
3507 const enum rte_bbdev_op_type op_type = test_vector.op_type;
3508 const uint16_t queue_id = ad->queue_ids[0];
3509 struct rte_bbdev_info info;
3510 const char *op_type_str;
3512 deq_total_time = deq_max_time = 0;
3513 deq_min_time = UINT64_MAX;
3515 TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3516 "BURST_SIZE should be <= %u", MAX_BURST);
3518 rte_bbdev_info_get(ad->dev_id, &info);
3520 op_type_str = rte_bbdev_op_type_str(op_type);
3521 TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
3523 printf("+ ------------------------------------------------------- +\n");
3524 printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
3525 info.dev_name, burst_sz, num_to_process, op_type_str);
3527 if (op_type == RTE_BBDEV_OP_TURBO_DEC)
3528 iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
3529 num_to_process, burst_sz, &deq_total_time,
3530 &deq_min_time, &deq_max_time);
3532 iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
3533 num_to_process, burst_sz, &deq_total_time,
3534 &deq_min_time, &deq_max_time);
3539 printf("Empty dequeue offload:\n"
3540 "\tavg: %lg cycles, %lg us\n"
3541 "\tmin: %lg cycles, %lg us\n"
3542 "\tmax: %lg cycles, %lg us\n",
3543 (double)deq_total_time / (double)iter,
3544 (double)(deq_total_time * 1000000) / (double)iter /
3545 (double)rte_get_tsc_hz(), (double)deq_min_time,
3546 (double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
3547 (double)deq_max_time, (double)(deq_max_time * 1000000) /
3550 return TEST_SUCCESS;
3557 return run_test_case(throughput_test);
3561 offload_cost_tc(void)
3563 return run_test_case(offload_cost_test);
3567 offload_latency_empty_q_tc(void)
3569 return run_test_case(offload_latency_empty_q_test);
3575 return run_test_case(latency_test);
3581 return run_test_case(throughput_test);
3584 static struct unit_test_suite bbdev_throughput_testsuite = {
3585 .suite_name = "BBdev Throughput Tests",
3586 .setup = testsuite_setup,
3587 .teardown = testsuite_teardown,
3588 .unit_test_cases = {
3589 TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
3590 TEST_CASES_END() /**< NULL terminate unit test array */
3594 static struct unit_test_suite bbdev_validation_testsuite = {
3595 .suite_name = "BBdev Validation Tests",
3596 .setup = testsuite_setup,
3597 .teardown = testsuite_teardown,
3598 .unit_test_cases = {
3599 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
3600 TEST_CASES_END() /**< NULL terminate unit test array */
3604 static struct unit_test_suite bbdev_latency_testsuite = {
3605 .suite_name = "BBdev Latency Tests",
3606 .setup = testsuite_setup,
3607 .teardown = testsuite_teardown,
3608 .unit_test_cases = {
3609 TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
3610 TEST_CASES_END() /**< NULL terminate unit test array */
3614 static struct unit_test_suite bbdev_offload_cost_testsuite = {
3615 .suite_name = "BBdev Offload Cost Tests",
3616 .setup = testsuite_setup,
3617 .teardown = testsuite_teardown,
3618 .unit_test_cases = {
3619 TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
3620 TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
3621 TEST_CASES_END() /**< NULL terminate unit test array */
3625 static struct unit_test_suite bbdev_interrupt_testsuite = {
3626 .suite_name = "BBdev Interrupt Tests",
3627 .setup = interrupt_testsuite_setup,
3628 .teardown = testsuite_teardown,
3629 .unit_test_cases = {
3630 TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
3631 TEST_CASES_END() /**< NULL terminate unit test array */
3635 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
3636 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
3637 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
3638 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
3639 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);