doc: add patch dependency syntax to contributing guide
[dpdk.git] / app / test-bbdev / test_bbdev_perf.c
index 7ab61ef..45c0d62 100644 (file)
 #include <rte_hexdump.h>
 #include <rte_interrupts.h>
 
-#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
-#include <fpga_lte_fec.h>
-#endif
-
 #include "main.h"
 #include "test_bbdev_vector.h"
 
 #define MAX_QUEUES RTE_MAX_LCORE
 #define TEST_REPETITIONS 1000
 
-#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
-#define FPGA_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
-#define FPGA_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
-#define VF_UL_QUEUE_VALUE 4
-#define VF_DL_QUEUE_VALUE 4
-#define UL_BANDWIDTH 3
-#define DL_BANDWIDTH 3
-#define UL_LOAD_BALANCE 128
-#define DL_LOAD_BALANCE 128
-#define FLR_TIMEOUT 610
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
+#include <fpga_lte_fec.h>
+#define FPGA_LTE_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
+#define FPGA_LTE_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
+#define VF_UL_4G_QUEUE_VALUE 4
+#define VF_DL_4G_QUEUE_VALUE 4
+#define UL_4G_BANDWIDTH 3
+#define DL_4G_BANDWIDTH 3
+#define UL_4G_LOAD_BALANCE 128
+#define DL_4G_LOAD_BALANCE 128
+#define FLR_4G_TIMEOUT 610
+#endif
+
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC
+#include <rte_pmd_fpga_5gnr_fec.h>
+#define FPGA_5GNR_PF_DRIVER_NAME ("intel_fpga_5gnr_fec_pf")
+#define FPGA_5GNR_VF_DRIVER_NAME ("intel_fpga_5gnr_fec_vf")
+#define VF_UL_5G_QUEUE_VALUE 4
+#define VF_DL_5G_QUEUE_VALUE 4
+#define UL_5G_BANDWIDTH 3
+#define DL_5G_BANDWIDTH 3
+#define UL_5G_LOAD_BALANCE 128
+#define DL_5G_LOAD_BALANCE 128
+#define FLR_5G_TIMEOUT 610
 #endif
 
 #define OPS_CACHE_SIZE 256U
 
 #define SYNC_WAIT 0
 #define SYNC_START 1
+#define INVALID_OPAQUE -1
 
 #define INVALID_QUEUE_ID -1
+/* Increment for next code block in external HARQ memory */
+#define HARQ_INCR 32768
+/* Headroom for filler LLRs insertion in HARQ buffer */
+#define FILLER_HEADROOM 1024
+/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
+#define N_ZC_1 66 /* N = 66 Zc for BG 1 */
+#define N_ZC_2 50 /* N = 50 Zc for BG 2 */
+#define K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */
+#define K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */
+#define K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */
+#define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
+#define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
+#define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
 
 static struct test_bbdev_vector test_vector;
 
 /* Switch between PMD and Interrupt for throughput TC */
 static bool intr_enabled;
 
+/* LLR arithmetic representation for numerical conversion */
+static int ldpc_llr_decimals;
+static int ldpc_llr_size;
+/* Keep track of the LDPC decoder device capability flag */
+static uint32_t ldpc_cap_flags;
+
 /* Represents tested active devices */
 static struct active_device {
        const char *driver_name;
@@ -103,6 +133,8 @@ struct thread_params {
        double ops_per_sec;
        double mbps;
        uint8_t iter_count;
+       double iter_average;
+       double bler;
        rte_atomic16_t nb_dequeued;
        rte_atomic16_t processing_status;
        rte_atomic16_t burst_sz;
@@ -296,7 +328,7 @@ check_dev_cap(const struct rte_bbdev_info *dev_info)
                                return TEST_FAILED;
                        }
                        if (intr_enabled && !(cap->capability_flags &
-                                       RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
+                                       RTE_BBDEV_LDPC_ENC_INTERRUPTS)) {
                                printf(
                                        "Dequeue interrupts are not supported!\n");
                                return TEST_FAILED;
@@ -339,12 +371,19 @@ check_dev_cap(const struct rte_bbdev_info *dev_info)
                                return TEST_FAILED;
                        }
                        if (intr_enabled && !(cap->capability_flags &
-                                       RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
+                                       RTE_BBDEV_LDPC_DEC_INTERRUPTS)) {
                                printf(
                                        "Dequeue interrupts are not supported!\n");
                                return TEST_FAILED;
                        }
-
+                       if (intr_enabled && (test_vector.ldpc_dec.op_flags &
+                               (RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
+                               RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
+                                       ))) {
+                               printf("Skip loop-back with interrupt\n");
+                               return TEST_FAILED;
+                       }
                        return TEST_SUCCESS;
                }
        }
@@ -380,7 +419,8 @@ create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
        snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
                        dev_id);
        return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
-                       RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM,
+                       RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM
+                                       + FILLER_HEADROOM,
                        (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
 }
 
@@ -435,27 +475,33 @@ create_mempools(struct active_device *ad, int socket_id,
                return TEST_SUCCESS;
 
        /* Inputs */
-       mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
-       mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
-       TEST_ASSERT_NOT_NULL(mp,
-                       "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
-                       mbuf_pool_size,
-                       ad->dev_id,
-                       socket_id);
-       ad->in_mbuf_pool = mp;
+       if (in->nb_segments > 0) {
+               mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+                               in->nb_segments);
+               mp = create_mbuf_pool(in, ad->dev_id, socket_id,
+                               mbuf_pool_size, "in");
+               TEST_ASSERT_NOT_NULL(mp,
+                               "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
+                               mbuf_pool_size,
+                               ad->dev_id,
+                               socket_id);
+               ad->in_mbuf_pool = mp;
+       }
 
        /* Hard outputs */
-       mbuf_pool_size = optimal_mempool_size(ops_pool_size *
-                       hard_out->nb_segments);
-       mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size,
-                       "hard_out");
-       TEST_ASSERT_NOT_NULL(mp,
-                       "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
-                       mbuf_pool_size,
-                       ad->dev_id,
-                       socket_id);
-       ad->hard_out_mbuf_pool = mp;
-
+       if (hard_out->nb_segments > 0) {
+               mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+                               hard_out->nb_segments);
+               mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id,
+                               mbuf_pool_size,
+                               "hard_out");
+               TEST_ASSERT_NOT_NULL(mp,
+                               "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
+                               mbuf_pool_size,
+                               ad->dev_id,
+                               socket_id);
+               ad->hard_out_mbuf_pool = mp;
+       }
 
        /* Soft outputs */
        if (soft_out->nb_segments > 0) {
@@ -519,14 +565,13 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
 /* Configure fpga lte fec with PF & VF values
  * if '-i' flag is set and using fpga device
  */
-#ifndef RTE_BUILD_SHARED_LIB
-#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
        if ((get_init_device() == true) &&
-               (!strcmp(info->drv.driver_name, FPGA_PF_DRIVER_NAME))) {
+               (!strcmp(info->drv.driver_name, FPGA_LTE_PF_DRIVER_NAME))) {
                struct fpga_lte_fec_conf conf;
                unsigned int i;
 
-               printf("Configure FPGA FEC Driver %s with default values\n",
+               printf("Configure FPGA LTE FEC Driver %s with default values\n",
                                info->drv.driver_name);
 
                /* clear default configuration before initialization */
@@ -540,22 +585,22 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
 
                for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
                        /* Number of UL queues per VF (fpga supports 8 VFs) */
-                       conf.vf_ul_queues_number[i] = VF_UL_QUEUE_VALUE;
+                       conf.vf_ul_queues_number[i] = VF_UL_4G_QUEUE_VALUE;
                        /* Number of DL queues per VF (fpga supports 8 VFs) */
-                       conf.vf_dl_queues_number[i] = VF_DL_QUEUE_VALUE;
+                       conf.vf_dl_queues_number[i] = VF_DL_4G_QUEUE_VALUE;
                }
 
                /* UL bandwidth. Needed for schedule algorithm */
-               conf.ul_bandwidth = UL_BANDWIDTH;
+               conf.ul_bandwidth = UL_4G_BANDWIDTH;
                /* DL bandwidth */
-               conf.dl_bandwidth = DL_BANDWIDTH;
+               conf.dl_bandwidth = DL_4G_BANDWIDTH;
 
                /* UL & DL load Balance Factor to 64 */
-               conf.ul_load_balance = UL_LOAD_BALANCE;
-               conf.dl_load_balance = DL_LOAD_BALANCE;
+               conf.ul_load_balance = UL_4G_LOAD_BALANCE;
+               conf.dl_load_balance = DL_4G_LOAD_BALANCE;
 
                /**< FLR timeout value */
-               conf.flr_time_out = FLR_TIMEOUT;
+               conf.flr_time_out = FLR_4G_TIMEOUT;
 
                /* setup FPGA PF with configuration information */
                ret = fpga_lte_fec_configure(info->dev_name, &conf);
@@ -564,6 +609,49 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
                                info->dev_name);
        }
 #endif
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC
+       if ((get_init_device() == true) &&
+               (!strcmp(info->drv.driver_name, FPGA_5GNR_PF_DRIVER_NAME))) {
+               struct fpga_5gnr_fec_conf conf;
+               unsigned int i;
+
+               printf("Configure FPGA 5GNR FEC Driver %s with default values\n",
+                               info->drv.driver_name);
+
+               /* clear default configuration before initialization */
+               memset(&conf, 0, sizeof(struct fpga_5gnr_fec_conf));
+
+               /* Set PF mode :
+                * true if PF is used for data plane
+                * false for VFs
+                */
+               conf.pf_mode_en = true;
+
+               for (i = 0; i < FPGA_5GNR_FEC_NUM_VFS; ++i) {
+                       /* Number of UL queues per VF (fpga supports 8 VFs) */
+                       conf.vf_ul_queues_number[i] = VF_UL_5G_QUEUE_VALUE;
+                       /* Number of DL queues per VF (fpga supports 8 VFs) */
+                       conf.vf_dl_queues_number[i] = VF_DL_5G_QUEUE_VALUE;
+               }
+
+               /* UL bandwidth. Needed for schedule algorithm */
+               conf.ul_bandwidth = UL_5G_BANDWIDTH;
+               /* DL bandwidth */
+               conf.dl_bandwidth = DL_5G_BANDWIDTH;
+
+               /* UL & DL load Balance Factor to 64 */
+               conf.ul_load_balance = UL_5G_LOAD_BALANCE;
+               conf.dl_load_balance = DL_5G_LOAD_BALANCE;
+
+               /**< FLR timeout value */
+               conf.flr_time_out = FLR_5G_TIMEOUT;
+
+               /* setup FPGA PF with configuration information */
+               ret = fpga_5gnr_fec_configure(info->dev_name, &conf);
+               TEST_ASSERT_SUCCESS(ret,
+                               "Failed to configure 5G FPGA PF for bbdev %s",
+                               info->dev_name);
+       }
 #endif
        nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
        nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
@@ -723,6 +811,9 @@ testsuite_teardown(void)
        /* Clear active devices structs. */
        memset(active_devs, 0, sizeof(active_devs));
        nb_active_devs = 0;
+
+       /* Disable interrupts */
+       intr_enabled = false;
 }
 
 static int
@@ -766,6 +857,7 @@ init_op_data_objs(struct rte_bbdev_op_data *bufs,
 {
        int ret;
        unsigned int i, j;
+       bool large_input = false;
 
        for (i = 0; i < n; ++i) {
                char *data;
@@ -776,24 +868,41 @@ init_op_data_objs(struct rte_bbdev_op_data *bufs,
                                op_type, n * ref_entries->nb_segments,
                                mbuf_pool->size);
 
-               TEST_ASSERT_SUCCESS(((seg->length + RTE_PKTMBUF_HEADROOM) >
-                               (uint32_t)UINT16_MAX),
-                               "Given data is bigger than allowed mbuf segment size");
-
+               if (seg->length > RTE_BBDEV_LDPC_E_MAX_MBUF) {
+                       /*
+                        * Special case when DPDK mbuf cannot handle
+                        * the required input size
+                        */
+                       printf("Warning: Larger input size than DPDK mbuf %d\n",
+                                       seg->length);
+                       large_input = true;
+               }
                bufs[i].data = m_head;
                bufs[i].offset = 0;
                bufs[i].length = 0;
 
                if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
-                       data = rte_pktmbuf_append(m_head, seg->length);
-                       TEST_ASSERT_NOT_NULL(data,
+                       if ((op_type == DATA_INPUT) && large_input) {
+                               /* Allocate a fake overused mbuf */
+                               data = rte_malloc(NULL, seg->length, 0);
+                               memcpy(data, seg->addr, seg->length);
+                               m_head->buf_addr = data;
+                               m_head->buf_iova = rte_malloc_virt2iova(data);
+                               m_head->data_off = 0;
+                               m_head->data_len = seg->length;
+                       } else {
+                               data = rte_pktmbuf_append(m_head, seg->length);
+                               TEST_ASSERT_NOT_NULL(data,
                                        "Couldn't append %u bytes to mbuf from %d data type mbuf pool",
                                        seg->length, op_type);
 
-                       TEST_ASSERT(data == RTE_PTR_ALIGN(data, min_alignment),
+                               TEST_ASSERT(data == RTE_PTR_ALIGN(
+                                               data, min_alignment),
                                        "Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
                                        data, min_alignment);
-                       rte_memcpy(data, seg->addr, seg->length);
+                               rte_memcpy(data, seg->addr, seg->length);
+                       }
+
                        bufs[i].length += seg->length;
 
                        for (j = 1; j < ref_entries->nb_segments; ++j) {
@@ -888,6 +997,45 @@ limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
        }
 }
 
+/*
+ * We may have to insert filler bits
+ * when they are required by the HARQ assumption
+ */
+static void
+ldpc_add_filler(struct rte_bbdev_op_data *input_ops,
+               const uint16_t n, struct test_op_params *op_params)
+{
+       struct rte_bbdev_op_ldpc_dec dec = op_params->ref_dec_op->ldpc_dec;
+
+       if (input_ops == NULL)
+               return;
+       /* No need to add filler if not required by device */
+       if (!(ldpc_cap_flags &
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS))
+               return;
+       /* No need to add filler for loopback operation */
+       if (dec.op_flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
+               return;
+
+       uint16_t i, j, parity_offset;
+       for (i = 0; i < n; ++i) {
+               struct rte_mbuf *m = input_ops[i].data;
+               int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
+                               input_ops[i].offset);
+               parity_offset = (dec.basegraph == 1 ? 20 : 8)
+                               * dec.z_c - dec.n_filler;
+               uint16_t new_hin_size = input_ops[i].length + dec.n_filler;
+               m->data_len = new_hin_size;
+               input_ops[i].length = new_hin_size;
+               for (j = new_hin_size - 1; j >= parity_offset + dec.n_filler;
+                               j--)
+                       llr[j] = llr[j - dec.n_filler];
+               uint16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
+               for (j = 0; j < dec.n_filler; j++)
+                       llr[parity_offset + j] = llr_max_pre_scaling;
+       }
+}
+
 static void
 ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
                const uint16_t n, const int8_t llr_size,
@@ -910,7 +1058,9 @@ ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
                                        ++byte_idx) {
 
                                llr_tmp = llr[byte_idx];
-                               if (llr_decimals == 2)
+                               if (llr_decimals == 4)
+                                       llr_tmp *= 8;
+                               else if (llr_decimals == 2)
                                        llr_tmp *= 2;
                                else if (llr_decimals == 0)
                                        llr_tmp /= 2;
@@ -978,12 +1128,24 @@ fill_queue_buffers(struct test_op_params *op_params,
                        capabilities->cap.turbo_dec.max_llr_modulus);
 
        if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
-               ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
-                       capabilities->cap.ldpc_dec.llr_size,
-                       capabilities->cap.ldpc_dec.llr_decimals);
-               ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
-                               capabilities->cap.ldpc_dec.llr_size,
-                               capabilities->cap.ldpc_dec.llr_decimals);
+               bool loopback = op_params->ref_dec_op->ldpc_dec.op_flags &
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
+               bool llr_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
+                               RTE_BBDEV_LDPC_LLR_COMPRESSION;
+               bool harq_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
+                               RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+               ldpc_llr_decimals = capabilities->cap.ldpc_dec.llr_decimals;
+               ldpc_llr_size = capabilities->cap.ldpc_dec.llr_size;
+               ldpc_cap_flags = capabilities->cap.ldpc_dec.capability_flags;
+               if (!loopback && !llr_comp)
+                       ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
+                                       ldpc_llr_size, ldpc_llr_decimals);
+               if (!loopback && !harq_comp)
+                       ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
+                                       ldpc_llr_size, ldpc_llr_decimals);
+               if (!loopback)
+                       ldpc_add_filler(*queue_ops[DATA_HARQ_INPUT], n,
+                                       op_params);
        }
 
        return 0;
@@ -1107,6 +1269,312 @@ copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
        }
 }
 
+
+/* Returns a random number drawn from a normal distribution
+ * with mean of 0 and variance of 1
+ * Marsaglia algorithm
+ */
+static double
+randn(int n)
+{
+       double S, Z, U1, U2, u, v, fac;
+
+       do {
+               U1 = (double)rand() / RAND_MAX;
+               U2 = (double)rand() / RAND_MAX;
+               u = 2. * U1 - 1.;
+               v = 2. * U2 - 1.;
+               S = u * u + v * v;
+       } while (S >= 1 || S == 0);
+       fac = sqrt(-2. * log(S) / S);
+       Z = (n % 2) ? u * fac : v * fac;
+       return Z;
+}
+
+static inline double
+maxstar(double A, double B)
+{
+       if (fabs(A - B) > 5)
+               return RTE_MAX(A, B);
+       else
+               return RTE_MAX(A, B) + log1p(exp(-fabs(A - B)));
+}
+
+/*
+ * Generate Qm LLRS for Qm==8
+ * Modulation, AWGN and LLR estimation from max log development
+ */
+static void
+gen_qm8_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
+{
+       int qm = 8;
+       int qam = 256;
+       int m, k;
+       double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
+       /* 5.1.4 of TS38.211 */
+       const double symbols_I[256] = {
+                       5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 5,
+                       5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 11,
+                       11, 9, 9, 11, 11, 9, 9, 13, 13, 15, 15, 13, 13,
+                       15, 15, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13, 15,
+                       15, 13, 13, 15, 15, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3,
+                       1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1,
+                       1, 3, 3, 1, 1, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13,
+                       15, 15, 13, 13, 15, 15, 11, 11, 9, 9, 11, 11, 9, 9,
+                       13, 13, 15, 15, 13, 13, 15, 15, -5, -5, -7, -7, -5,
+                       -5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -5, -5,
+                       -7, -7, -5, -5, -7, -7, -3, -3, -1, -1, -3, -3,
+                       -1, -1, -11, -11, -9, -9, -11, -11, -9, -9, -13,
+                       -13, -15, -15, -13, -13, -15, -15, -11, -11, -9,
+                       -9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
+                       -13, -15, -15, -5, -5, -7, -7, -5, -5, -7, -7, -3,
+                       -3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7, -5, -5,
+                       -7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -11, -11,
+                       -9, -9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
+                       -13, -15, -15, -11, -11, -9, -9, -11, -11, -9, -9,
+                       -13, -13, -15, -15, -13, -13, -15, -15};
+       const double symbols_Q[256] = {
+                       5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
+                       9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15, 13,
+                       15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
+                       11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13,
+                       15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1, -5,
+                       -7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13,
+                       -15, -13, -15, -11, -9, -11, -9, -13, -15, -13,
+                       -15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7, -5,
+                       -7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
+                       -13, -15, -11, -9, -11, -9, -13, -15, -13, -15, 5,
+                       7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
+                       9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15,
+                       13, 15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1,
+                       3, 1, 11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9,
+                       13, 15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1,
+                       -5, -7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9,
+                       -13, -15, -13, -15, -11, -9, -11, -9, -13, -15,
+                       -13, -15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7,
+                       -5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
+                       -13, -15, -11, -9, -11, -9, -13, -15, -13, -15};
+       /* Average constellation point energy */
+       N0 *= 170.0;
+       for (k = 0; k < qm; k++)
+               b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
+       /* 5.1.4 of TS38.211 */
+       I = (1 - 2 * b[0]) * (8 - (1 - 2 * b[2]) *
+                       (4 - (1 - 2 * b[4]) * (2 - (1 - 2 * b[6]))));
+       Q = (1 - 2 * b[1]) * (8 - (1 - 2 * b[3]) *
+                       (4 - (1 - 2 * b[5]) * (2 - (1 - 2 * b[7]))));
+       /* AWGN channel */
+       I += sqrt(N0 / 2) * randn(0);
+       Q += sqrt(N0 / 2) * randn(1);
+       /*
+        * Calculate the log of the probability that each of
+        * the constellation points was transmitted
+        */
+       for (m = 0; m < qam; m++)
+               log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
+                               + pow(Q - symbols_Q[m], 2.0)) / N0;
+       /* Calculate an LLR for each of the k_64QAM bits in the set */
+       for (k = 0; k < qm; k++) {
+               p0 = -999999;
+               p1 = -999999;
+               /* For each constellation point */
+               for (m = 0; m < qam; m++) {
+                       if ((m >> (qm - k - 1)) & 1)
+                               p1 = maxstar(p1, log_syml_prob[m]);
+                       else
+                               p0 = maxstar(p0, log_syml_prob[m]);
+               }
+               /* Calculate the LLR */
+               llr_ = p0 - p1;
+               llr_ *= (1 << ldpc_llr_decimals);
+               llr_ = round(llr_);
+               if (llr_ > llr_max)
+                       llr_ = llr_max;
+               if (llr_ < -llr_max)
+                       llr_ = -llr_max;
+               llrs[qm * i + k] = (int8_t) llr_;
+       }
+}
+
+
+/*
+ * Generate Qm LLRS for Qm==6
+ * Modulation, AWGN and LLR estimation from max log development
+ */
+static void
+gen_qm6_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
+{
+       int qm = 6;
+       int qam = 64;
+       int m, k;
+       double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
+       /* 5.1.4 of TS38.211 */
+       const double symbols_I[64] = {
+                       3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
+                       3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
+                       -3, -3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7,
+                       -5, -5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1,
+                       -5, -5, -7, -7, -5, -5, -7, -7};
+       const double symbols_Q[64] = {
+                       3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7,
+                       -3, -1, -3, -1, -5, -7, -5, -7, -3, -1, -3, -1,
+                       -5, -7, -5, -7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
+                       5, 7, 5, 7, -3, -1, -3, -1, -5, -7, -5, -7,
+                       -3, -1, -3, -1, -5, -7, -5, -7};
+       /* Average constellation point energy */
+       N0 *= 42.0;
+       for (k = 0; k < qm; k++)
+               b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
+       /* 5.1.4 of TS38.211 */
+       I = (1 - 2 * b[0])*(4 - (1 - 2 * b[2]) * (2 - (1 - 2 * b[4])));
+       Q = (1 - 2 * b[1])*(4 - (1 - 2 * b[3]) * (2 - (1 - 2 * b[5])));
+       /* AWGN channel */
+       I += sqrt(N0 / 2) * randn(0);
+       Q += sqrt(N0 / 2) * randn(1);
+       /*
+        * Calculate the log of the probability that each of
+        * the constellation points was transmitted
+        */
+       for (m = 0; m < qam; m++)
+               log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
+                               + pow(Q - symbols_Q[m], 2.0)) / N0;
+       /* Calculate an LLR for each of the k_64QAM bits in the set */
+       for (k = 0; k < qm; k++) {
+               p0 = -999999;
+               p1 = -999999;
+               /* For each constellation point */
+               for (m = 0; m < qam; m++) {
+                       if ((m >> (qm - k - 1)) & 1)
+                               p1 = maxstar(p1, log_syml_prob[m]);
+                       else
+                               p0 = maxstar(p0, log_syml_prob[m]);
+               }
+               /* Calculate the LLR */
+               llr_ = p0 - p1;
+               llr_ *= (1 << ldpc_llr_decimals);
+               llr_ = round(llr_);
+               if (llr_ > llr_max)
+                       llr_ = llr_max;
+               if (llr_ < -llr_max)
+                       llr_ = -llr_max;
+               llrs[qm * i + k] = (int8_t) llr_;
+       }
+}
+
+/*
+ * Generate Qm LLRS for Qm==4
+ * Modulation, AWGN and LLR estimation from max log development
+ */
+static void
+gen_qm4_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
+{
+       int qm = 4;
+       int qam = 16;
+       int m, k;
+       double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
+       /* 5.1.4 of TS38.211 */
+       const double symbols_I[16] = {1, 1, 3, 3, 1, 1, 3, 3,
+                       -1, -1, -3, -3, -1, -1, -3, -3};
+       const double symbols_Q[16] = {1, 3, 1, 3, -1, -3, -1, -3,
+                       1, 3, 1, 3, -1, -3, -1, -3};
+       /* Average constellation point energy */
+       N0 *= 10.0;
+       for (k = 0; k < qm; k++)
+               b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
+       /* 5.1.4 of TS38.211 */
+       I = (1 - 2 * b[0]) * (2 - (1 - 2 * b[2]));
+       Q = (1 - 2 * b[1]) * (2 - (1 - 2 * b[3]));
+       /* AWGN channel */
+       I += sqrt(N0 / 2) * randn(0);
+       Q += sqrt(N0 / 2) * randn(1);
+       /*
+        * Calculate the log of the probability that each of
+        * the constellation points was transmitted
+        */
+       for (m = 0; m < qam; m++)
+               log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
+                               + pow(Q - symbols_Q[m], 2.0)) / N0;
+       /* Calculate an LLR for each of the k_64QAM bits in the set */
+       for (k = 0; k < qm; k++) {
+               p0 = -999999;
+               p1 = -999999;
+               /* For each constellation point */
+               for (m = 0; m < qam; m++) {
+                       if ((m >> (qm - k - 1)) & 1)
+                               p1 = maxstar(p1, log_syml_prob[m]);
+                       else
+                               p0 = maxstar(p0, log_syml_prob[m]);
+               }
+               /* Calculate the LLR */
+               llr_ = p0 - p1;
+               llr_ *= (1 << ldpc_llr_decimals);
+               llr_ = round(llr_);
+               if (llr_ > llr_max)
+                       llr_ = llr_max;
+               if (llr_ < -llr_max)
+                       llr_ = -llr_max;
+               llrs[qm * i + k] = (int8_t) llr_;
+       }
+}
+
+static void
+gen_qm2_llr(int8_t *llrs, uint32_t j, double N0, double llr_max)
+{
+       double b, b1, n;
+       double coeff = 2.0 * sqrt(N0);
+
+       /* Ignore in vectors rare quasi null LLRs not to be saturated */
+       if (llrs[j] < 8 && llrs[j] > -8)
+               return;
+
+       /* Note don't change sign here */
+       n = randn(j % 2);
+       b1 = ((llrs[j] > 0 ? 2.0 : -2.0)
+                       + coeff * n) / N0;
+       b = b1 * (1 << ldpc_llr_decimals);
+       b = round(b);
+       if (b > llr_max)
+               b = llr_max;
+       if (b < -llr_max)
+               b = -llr_max;
+       llrs[j] = (int8_t) b;
+}
+
+/* Generate LLR for a given SNR */
+static void
+generate_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs,
+               struct rte_bbdev_dec_op *ref_op)
+{
+       struct rte_mbuf *m;
+       uint16_t qm;
+       uint32_t i, j, e, range;
+       double N0, llr_max;
+
+       e = ref_op->ldpc_dec.cb_params.e;
+       qm = ref_op->ldpc_dec.q_m;
+       llr_max = (1 << (ldpc_llr_size - 1)) - 1;
+       range = e / qm;
+       N0 = 1.0 / pow(10.0, get_snr() / 10.0);
+
+       for (i = 0; i < n; ++i) {
+               m = inputs[i].data;
+               int8_t *llrs = rte_pktmbuf_mtod_offset(m, int8_t *, 0);
+               if (qm == 8) {
+                       for (j = 0; j < range; ++j)
+                               gen_qm8_llr(llrs, j, N0, llr_max);
+               } else if (qm == 6) {
+                       for (j = 0; j < range; ++j)
+                               gen_qm6_llr(llrs, j, N0, llr_max);
+               } else if (qm == 4) {
+                       for (j = 0; j < range; ++j)
+                               gen_qm4_llr(llrs, j, N0, llr_max);
+               } else {
+                       for (j = 0; j < e; ++j)
+                               gen_qm2_llr(llrs, j, N0, llr_max);
+               }
+       }
+}
+
 static void
 copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
                unsigned int start_idx,
@@ -1146,17 +1614,21 @@ copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
                ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
                ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
 
-               ops[i]->ldpc_dec.hard_output = hard_outputs[start_idx + i];
-               ops[i]->ldpc_dec.input = inputs[start_idx + i];
+               if (hard_outputs != NULL)
+                       ops[i]->ldpc_dec.hard_output =
+                                       hard_outputs[start_idx + i];
+               if (inputs != NULL)
+                       ops[i]->ldpc_dec.input =
+                                       inputs[start_idx + i];
                if (soft_outputs != NULL)
                        ops[i]->ldpc_dec.soft_output =
-                               soft_outputs[start_idx + i];
+                                       soft_outputs[start_idx + i];
                if (harq_inputs != NULL)
                        ops[i]->ldpc_dec.harq_combined_input =
                                        harq_inputs[start_idx + i];
                if (harq_outputs != NULL)
                        ops[i]->ldpc_dec.harq_combined_output =
-                               harq_outputs[start_idx + i];
+                                       harq_outputs[start_idx + i];
        }
 }
 
@@ -1198,7 +1670,22 @@ static int
 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
                unsigned int order_idx, const int expected_status)
 {
-       TEST_ASSERT(op->status == expected_status,
+       int status = op->status;
+       /* ignore parity mismatch false alarms for long iterations */
+       if (get_iter_max() >= 10) {
+               if (!(expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
+                               (status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
+                       printf("WARNING: Ignore Syndrome Check mismatch\n");
+                       status -= (1 << RTE_BBDEV_SYNDROME_ERROR);
+               }
+               if ((expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
+                               !(status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
+                       printf("WARNING: Ignore Syndrome Check mismatch\n");
+                       status += (1 << RTE_BBDEV_SYNDROME_ERROR);
+               }
+       }
+
+       TEST_ASSERT(status == expected_status,
                        "op_status (%d) != expected_status (%d)",
                        op->status, expected_status);
 
@@ -1217,9 +1704,10 @@ check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
                        "op_status (%d) != expected_status (%d)",
                        op->status, expected_status);
 
-       TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
-                       "Ordering error, expected %p, got %p",
-                       (void *)(uintptr_t)order_idx, op->opaque_data);
+       if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE)
+               TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
+                               "Ordering error, expected %p, got %p",
+                               (void *)(uintptr_t)order_idx, op->opaque_data);
 
        return TEST_SUCCESS;
 }
@@ -1263,6 +1751,173 @@ validate_op_chain(struct rte_bbdev_op_data *op,
        return TEST_SUCCESS;
 }
 
+/*
+ * Compute K0 for a given configuration for HARQ output length computation
+ * As per definition in 3GPP 38.212 Table 5.4.2.1-2
+ */
+static inline uint16_t
+get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
+{
+       if (rv_index == 0)
+               return 0;
+       uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
+       if (n_cb == n) {
+               if (rv_index == 1)
+                       return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
+               else if (rv_index == 2)
+                       return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
+               else
+                       return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
+       }
+       /* LBRM case - includes a division by N */
+       if (rv_index == 1)
+               return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
+                               / n) * z_c;
+       else if (rv_index == 2)
+               return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
+                               / n) * z_c;
+       else
+               return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
+                               / n) * z_c;
+}
+
+/* HARQ output length including the Filler bits */
+static inline uint16_t
+compute_harq_len(struct rte_bbdev_op_ldpc_dec *ops_ld)
+{
+       uint16_t k0 = 0;
+       uint8_t max_rv = (ops_ld->rv_index == 1) ? 3 : ops_ld->rv_index;
+       k0 = get_k0(ops_ld->n_cb, ops_ld->z_c, ops_ld->basegraph, max_rv);
+       /* Compute RM out size and number of rows */
+       uint16_t parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
+                       * ops_ld->z_c - ops_ld->n_filler;
+       uint16_t deRmOutSize = RTE_MIN(
+                       k0 + ops_ld->cb_params.e +
+                       ((k0 > parity_offset) ?
+                                       0 : ops_ld->n_filler),
+                                       ops_ld->n_cb);
+       uint16_t numRows = ((deRmOutSize + ops_ld->z_c - 1)
+                       / ops_ld->z_c);
+       uint16_t harq_output_len = numRows * ops_ld->z_c;
+       return harq_output_len;
+}
+
+static inline int
+validate_op_harq_chain(struct rte_bbdev_op_data *op,
+               struct op_data_entries *orig_op,
+               struct rte_bbdev_op_ldpc_dec *ops_ld)
+{
+       uint8_t i;
+       uint32_t j, jj, k;
+       struct rte_mbuf *m = op->data;
+       uint8_t nb_dst_segments = orig_op->nb_segments;
+       uint32_t total_data_size = 0;
+       int8_t *harq_orig, *harq_out, abs_harq_origin;
+       uint32_t byte_error = 0, cum_error = 0, error;
+       int16_t llr_max = (1 << (ldpc_llr_size - ldpc_llr_decimals)) - 1;
+       int16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
+       uint16_t parity_offset;
+
+       TEST_ASSERT(nb_dst_segments == m->nb_segs,
+                       "Number of segments differ in original (%u) and filled (%u) op",
+                       nb_dst_segments, m->nb_segs);
+
+       /* Validate each mbuf segment length */
+       for (i = 0; i < nb_dst_segments; ++i) {
+               /* Apply offset to the first mbuf segment */
+               uint16_t offset = (i == 0) ? op->offset : 0;
+               uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
+               total_data_size += orig_op->segments[i].length;
+
+               TEST_ASSERT(orig_op->segments[i].length <
+                               (uint32_t)(data_len + 64),
+                               "Length of segment differ in original (%u) and filled (%u) op",
+                               orig_op->segments[i].length, data_len);
+               harq_orig = (int8_t *) orig_op->segments[i].addr;
+               harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset);
+
+               if (!(ldpc_cap_flags &
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS
+                               ) || (ops_ld->op_flags &
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+                       data_len -= ops_ld->z_c;
+                       parity_offset = data_len;
+               } else {
+                       /* Compute RM out size and number of rows */
+                       parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
+                                       * ops_ld->z_c - ops_ld->n_filler;
+                       uint16_t deRmOutSize = compute_harq_len(ops_ld) -
+                                       ops_ld->n_filler;
+                       if (data_len > deRmOutSize)
+                               data_len = deRmOutSize;
+                       if (data_len > orig_op->segments[i].length)
+                               data_len = orig_op->segments[i].length;
+               }
+               /*
+                * HARQ output can have minor differences
+                * due to integer representation and related scaling
+                */
+               for (j = 0, jj = 0; j < data_len; j++, jj++) {
+                       if (j == parity_offset) {
+                               /* Special Handling of the filler bits */
+                               for (k = 0; k < ops_ld->n_filler; k++) {
+                                       if (harq_out[jj] !=
+                                                       llr_max_pre_scaling) {
+                                               printf("HARQ Filler issue %d: %d %d\n",
+                                                       jj, harq_out[jj],
+                                                       llr_max);
+                                               byte_error++;
+                                       }
+                                       jj++;
+                               }
+                       }
+                       if (!(ops_ld->op_flags &
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+                               if (ldpc_llr_decimals > 1)
+                                       harq_out[jj] = (harq_out[jj] + 1)
+                                               >> (ldpc_llr_decimals - 1);
+                               /* Saturated to S7 */
+                               if (harq_orig[j] > llr_max)
+                                       harq_orig[j] = llr_max;
+                               if (harq_orig[j] < -llr_max)
+                                       harq_orig[j] = -llr_max;
+                       }
+                       if (harq_orig[j] != harq_out[jj]) {
+                               error = (harq_orig[j] > harq_out[jj]) ?
+                                               harq_orig[j] - harq_out[jj] :
+                                               harq_out[jj] - harq_orig[j];
+                               abs_harq_origin = harq_orig[j] > 0 ?
+                                                       harq_orig[j] :
+                                                       -harq_orig[j];
+                               /* Residual quantization error */
+                               if ((error > 8 && (abs_harq_origin <
+                                               (llr_max - 16))) ||
+                                               (error > 16)) {
+                                       printf("HARQ mismatch %d: exp %d act %d => %d\n",
+                                                       j, harq_orig[j],
+                                                       harq_out[jj], error);
+                                       byte_error++;
+                                       cum_error += error;
+                               }
+                       }
+               }
+               m = m->next;
+       }
+
+       if (byte_error)
+               TEST_ASSERT(byte_error <= 1,
+                               "HARQ output mismatch (%d) %d",
+                               byte_error, cum_error);
+
+       /* Validate total mbuf pkt length */
+       uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
+       TEST_ASSERT(total_data_size < pkt_len + 64,
+                       "Length of data differ in original (%u) and filled (%u) op",
+                       total_data_size, pkt_len);
+
+       return TEST_SUCCESS;
+}
+
 static int
 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
                struct rte_bbdev_dec_op *ref_op, const int vector_mask)
@@ -1306,6 +1961,29 @@ validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
        return TEST_SUCCESS;
 }
 
+/* Check Number of code blocks errors */
+static int
+validate_ldpc_bler(struct rte_bbdev_dec_op **ops, const uint16_t n)
+{
+       unsigned int i;
+       struct op_data_entries *hard_data_orig =
+                       &test_vector.entries[DATA_HARD_OUTPUT];
+       struct rte_bbdev_op_ldpc_dec *ops_td;
+       struct rte_bbdev_op_data *hard_output;
+       int errors = 0;
+       struct rte_mbuf *m;
+
+       for (i = 0; i < n; ++i) {
+               ops_td = &ops[i]->ldpc_dec;
+               hard_output = &ops_td->hard_output;
+               m = hard_output->data;
+               if (memcmp(rte_pktmbuf_mtod_offset(m, uint32_t *, 0),
+                               hard_data_orig->segments[0].addr,
+                               hard_data_orig->segments[0].length))
+                       errors++;
+       }
+       return errors;
+}
 
 static int
 validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
@@ -1338,8 +2016,15 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
                        TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
                                        "Returned iter_count (%d) > expected iter_count (%d)",
                                        ops_td->iter_count, ref_td->iter_count);
-               /* We can ignore data when the decoding failed to converge */
-               if ((ops[i]->status &  (1 << RTE_BBDEV_SYNDROME_ERROR)) == 0)
+               /*
+                * We can ignore output data when the decoding failed to
+                * converge or for loop-back cases
+                */
+               if (!check_bit(ops[i]->ldpc_dec.op_flags,
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
+                               ) && (
+                               ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR
+                                               )) == 0)
                        TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
                                        hard_data_orig),
                                        "Hard output buffers (CB=%u) are not equal",
@@ -1352,12 +2037,18 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
                                        i);
                if (ref_op->ldpc_dec.op_flags &
                                RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
-                       ldpc_input_llr_scaling(harq_output, 1, 8, 0);
-                       TEST_ASSERT_SUCCESS(validate_op_chain(harq_output,
-                                       harq_data_orig),
+                       TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
+                                       harq_data_orig, ops_td),
                                        "HARQ output buffers (CB=%u) are not equal",
                                        i);
                }
+               if (ref_op->ldpc_dec.op_flags &
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
+                       TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
+                                       harq_data_orig, ops_td),
+                                       "HARQ output buffers (CB=%u) are not equal",
+                                       i);
+
        }
 
        return TEST_SUCCESS;
@@ -1696,6 +2387,105 @@ run_test_case(test_case_function *test_case_func)
        return ret;
 }
 
+
+/* Push back the HARQ output from DDR to host */
+static void
+retrieve_harq_ddr(uint16_t dev_id, uint16_t queue_id,
+               struct rte_bbdev_dec_op **ops,
+               const uint16_t n)
+{
+       uint16_t j;
+       int save_status, ret;
+       uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * 1024;
+       struct rte_bbdev_dec_op *ops_deq[MAX_BURST];
+       uint32_t flags = ops[0]->ldpc_dec.op_flags;
+       bool loopback = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
+       bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
+       bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
+       bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+       for (j = 0; j < n; ++j) {
+               if ((loopback && mem_out) || hc_out) {
+                       save_status = ops[j]->status;
+                       ops[j]->ldpc_dec.op_flags =
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
+                       if (h_comp)
+                               ops[j]->ldpc_dec.op_flags +=
+                                       RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+                       ops[j]->ldpc_dec.harq_combined_input.offset =
+                                       harq_offset;
+                       ops[j]->ldpc_dec.harq_combined_output.offset = 0;
+                       harq_offset += HARQ_INCR;
+                       if (!loopback)
+                               ops[j]->ldpc_dec.harq_combined_input.length =
+                               ops[j]->ldpc_dec.harq_combined_output.length;
+                       rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+                                       &ops[j], 1);
+                       ret = 0;
+                       while (ret == 0)
+                               ret = rte_bbdev_dequeue_ldpc_dec_ops(
+                                               dev_id, queue_id,
+                                               &ops_deq[j], 1);
+                       ops[j]->ldpc_dec.op_flags = flags;
+                       ops[j]->status = save_status;
+               }
+       }
+}
+
+/*
+ * Push back the HARQ output from HW DDR to Host
+ * Preload HARQ memory input and adjust HARQ offset
+ */
+static void
+preload_harq_ddr(uint16_t dev_id, uint16_t queue_id,
+               struct rte_bbdev_dec_op **ops, const uint16_t n,
+               bool preload)
+{
+       uint16_t j;
+       int ret;
+       uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * 1024;
+       struct rte_bbdev_op_data save_hc_in, save_hc_out;
+       struct rte_bbdev_dec_op *ops_deq[MAX_BURST];
+       uint32_t flags = ops[0]->ldpc_dec.op_flags;
+       bool mem_in = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
+       bool hc_in = flags & RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;
+       bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
+       bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
+       bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+       for (j = 0; j < n; ++j) {
+               if ((mem_in || hc_in) && preload) {
+                       save_hc_in = ops[j]->ldpc_dec.harq_combined_input;
+                       save_hc_out = ops[j]->ldpc_dec.harq_combined_output;
+                       ops[j]->ldpc_dec.op_flags =
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
+                               RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
+                       if (h_comp)
+                               ops[j]->ldpc_dec.op_flags +=
+                                       RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+                       ops[j]->ldpc_dec.harq_combined_output.offset =
+                                       harq_offset;
+                       ops[j]->ldpc_dec.harq_combined_input.offset = 0;
+                       rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+                                       &ops[j], 1);
+                       ret = 0;
+                       while (ret == 0)
+                               ret = rte_bbdev_dequeue_ldpc_dec_ops(
+                                       dev_id, queue_id, &ops_deq[j], 1);
+                       ops[j]->ldpc_dec.op_flags = flags;
+                       ops[j]->ldpc_dec.harq_combined_input = save_hc_in;
+                       ops[j]->ldpc_dec.harq_combined_output = save_hc_out;
+               }
+               /* Adjust HARQ offset when we reach external DDR */
+               if (mem_in || hc_in)
+                       ops[j]->ldpc_dec.harq_combined_input.offset
+                               = harq_offset;
+               if (mem_out || hc_out)
+                       ops[j]->ldpc_dec.harq_combined_output.offset
+                               = harq_offset;
+               harq_offset += HARQ_INCR;
+       }
+}
+
 static void
 dequeue_event_callback(uint16_t dev_id,
                enum rte_bbdev_event_type event, void *cb_arg,
@@ -1731,13 +2521,22 @@ dequeue_event_callback(uint16_t dev_id,
        burst_sz = rte_atomic16_read(&tp->burst_sz);
        num_ops = tp->op_params->num_to_process;
 
-       if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
-                       test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+       if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
                deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
                                &tp->dec_ops[
                                        rte_atomic16_read(&tp->nb_dequeued)],
                                burst_sz);
-       else
+       else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+               deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+                               &tp->dec_ops[
+                                       rte_atomic16_read(&tp->nb_dequeued)],
+                               burst_sz);
+       else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+               deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+                               &tp->enc_ops[
+                                       rte_atomic16_read(&tp->nb_dequeued)],
+                               burst_sz);
+       else /*RTE_BBDEV_OP_TURBO_ENC*/
                deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
                                &tp->enc_ops[
                                        rte_atomic16_read(&tp->nb_dequeued)],
@@ -1823,7 +2622,7 @@ dequeue_event_callback(uint16_t dev_id,
 }
 
 static int
-throughput_intr_lcore_dec(void *arg)
+throughput_intr_lcore_ldpc_dec(void *arg)
 {
        struct thread_params *tp = arg;
        unsigned int enqueued;
@@ -1834,8 +2633,14 @@ throughput_intr_lcore_dec(void *arg)
        struct test_buffers *bufs = NULL;
        struct rte_bbdev_info info;
        int ret, i, j;
+       struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
        uint16_t num_to_enq, enq;
 
+       bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
+       bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
+
        TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
                        "BURST_SIZE should be <= %u", MAX_BURST);
 
@@ -1862,17 +2667,23 @@ throughput_intr_lcore_dec(void *arg)
        TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
                        num_to_process);
        if (test_vector.op_type != RTE_BBDEV_OP_NONE)
-               copy_reference_dec_op(ops, num_to_process, 0, bufs->inputs,
+               copy_reference_ldpc_dec_op(ops, num_to_process, 0, bufs->inputs,
                                bufs->hard_outputs, bufs->soft_outputs,
-                               tp->op_params->ref_dec_op);
+                               bufs->harq_inputs, bufs->harq_outputs, ref_op);
 
        /* Set counter to validate the ordering */
        for (j = 0; j < num_to_process; ++j)
                ops[j]->opaque_data = (void *)(uintptr_t)j;
 
        for (j = 0; j < TEST_REPETITIONS; ++j) {
-               for (i = 0; i < num_to_process; ++i)
-                       rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
+               for (i = 0; i < num_to_process; ++i) {
+                       if (!loopback)
+                               rte_pktmbuf_reset(
+                                       ops[i]->ldpc_dec.hard_output.data);
+                       if (hc_out || loopback)
+                               mbuf_reset(
+                               ops[i]->ldpc_dec.harq_combined_output.data);
+               }
 
                tp->start_time = rte_rdtsc_precise();
                for (enqueued = 0; enqueued < num_to_process;) {
@@ -1883,7 +2694,98 @@ throughput_intr_lcore_dec(void *arg)
 
                        enq = 0;
                        do {
-                               enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+                               enq += rte_bbdev_enqueue_ldpc_dec_ops(
+                                               tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(num_to_enq != enq));
+                       enqueued += enq;
+
+                       /* Write to thread burst_sz current number of enqueued
+                        * descriptors. It ensures that proper number of
+                        * descriptors will be dequeued in callback
+                        * function - needed for last batch in case where
+                        * the number of operations is not a multiple of
+                        * burst size.
+                        */
+                       rte_atomic16_set(&tp->burst_sz, num_to_enq);
+
+                       /* Wait until processing of previous batch is
+                        * completed
+                        */
+                       while (rte_atomic16_read(&tp->nb_dequeued) !=
+                                       (int16_t) enqueued)
+                               rte_pause();
+               }
+               if (j != TEST_REPETITIONS - 1)
+                       rte_atomic16_clear(&tp->nb_dequeued);
+       }
+
+       return TEST_SUCCESS;
+}
+
+static int
+throughput_intr_lcore_dec(void *arg)
+{
+       struct thread_params *tp = arg;
+       unsigned int enqueued;
+       const uint16_t queue_id = tp->queue_id;
+       const uint16_t burst_sz = tp->op_params->burst_sz;
+       const uint16_t num_to_process = tp->op_params->num_to_process;
+       struct rte_bbdev_dec_op *ops[num_to_process];
+       struct test_buffers *bufs = NULL;
+       struct rte_bbdev_info info;
+       int ret, i, j;
+       uint16_t num_to_enq, enq;
+
+       TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+                       "BURST_SIZE should be <= %u", MAX_BURST);
+
+       TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
+                       "Failed to enable interrupts for dev: %u, queue_id: %u",
+                       tp->dev_id, queue_id);
+
+       rte_bbdev_info_get(tp->dev_id, &info);
+
+       TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
+                       "NUM_OPS cannot exceed %u for this device",
+                       info.drv.queue_size_lim);
+
+       bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+       rte_atomic16_clear(&tp->processing_status);
+       rte_atomic16_clear(&tp->nb_dequeued);
+
+       while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+               rte_pause();
+
+       ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
+                               num_to_process);
+       TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
+                       num_to_process);
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+               copy_reference_dec_op(ops, num_to_process, 0, bufs->inputs,
+                               bufs->hard_outputs, bufs->soft_outputs,
+                               tp->op_params->ref_dec_op);
+
+       /* Set counter to validate the ordering */
+       for (j = 0; j < num_to_process; ++j)
+               ops[j]->opaque_data = (void *)(uintptr_t)j;
+
+       for (j = 0; j < TEST_REPETITIONS; ++j) {
+               for (i = 0; i < num_to_process; ++i)
+                       rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
+
+               tp->start_time = rte_rdtsc_precise();
+               for (enqueued = 0; enqueued < num_to_process;) {
+                       num_to_enq = burst_sz;
+
+                       if (unlikely(num_to_process - enqueued < num_to_enq))
+                               num_to_enq = num_to_process - enqueued;
+
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
                                                queue_id, &ops[enqueued],
                                                num_to_enq);
                        } while (unlikely(num_to_enq != enq));
@@ -2001,6 +2903,98 @@ throughput_intr_lcore_enc(void *arg)
        return TEST_SUCCESS;
 }
 
+
+static int
+throughput_intr_lcore_ldpc_enc(void *arg)
+{
+       struct thread_params *tp = arg;
+       unsigned int enqueued;
+       const uint16_t queue_id = tp->queue_id;
+       const uint16_t burst_sz = tp->op_params->burst_sz;
+       const uint16_t num_to_process = tp->op_params->num_to_process;
+       struct rte_bbdev_enc_op *ops[num_to_process];
+       struct test_buffers *bufs = NULL;
+       struct rte_bbdev_info info;
+       int ret, i, j;
+       uint16_t num_to_enq, enq;
+
+       TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+                       "BURST_SIZE should be <= %u", MAX_BURST);
+
+       TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
+                       "Failed to enable interrupts for dev: %u, queue_id: %u",
+                       tp->dev_id, queue_id);
+
+       rte_bbdev_info_get(tp->dev_id, &info);
+
+       TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
+                       "NUM_OPS cannot exceed %u for this device",
+                       info.drv.queue_size_lim);
+
+       bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+       rte_atomic16_clear(&tp->processing_status);
+       rte_atomic16_clear(&tp->nb_dequeued);
+
+       while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+               rte_pause();
+
+       ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
+                       num_to_process);
+       TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
+                       num_to_process);
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+               copy_reference_ldpc_enc_op(ops, num_to_process, 0,
+                               bufs->inputs, bufs->hard_outputs,
+                               tp->op_params->ref_enc_op);
+
+       /* Set counter to validate the ordering */
+       for (j = 0; j < num_to_process; ++j)
+               ops[j]->opaque_data = (void *)(uintptr_t)j;
+
+       for (j = 0; j < TEST_REPETITIONS; ++j) {
+               for (i = 0; i < num_to_process; ++i)
+                       rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
+
+               tp->start_time = rte_rdtsc_precise();
+               for (enqueued = 0; enqueued < num_to_process;) {
+                       num_to_enq = burst_sz;
+
+                       if (unlikely(num_to_process - enqueued < num_to_enq))
+                               num_to_enq = num_to_process - enqueued;
+
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_ldpc_enc_ops(
+                                               tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
+                       /* Write to thread burst_sz current number of enqueued
+                        * descriptors. It ensures that proper number of
+                        * descriptors will be dequeued in callback
+                        * function - needed for last batch in case where
+                        * the number of operations is not a multiple of
+                        * burst size.
+                        */
+                       rte_atomic16_set(&tp->burst_sz, num_to_enq);
+
+                       /* Wait until processing of previous batch is
+                        * completed
+                        */
+                       while (rte_atomic16_read(&tp->nb_dequeued) !=
+                                       (int16_t) enqueued)
+                               rte_pause();
+               }
+               if (j != TEST_REPETITIONS - 1)
+                       rte_atomic16_clear(&tp->nb_dequeued);
+       }
+
+       return TEST_SUCCESS;
+}
+
 static int
 throughput_pmd_lcore_dec(void *arg)
 {
@@ -2098,6 +3092,139 @@ throughput_pmd_lcore_dec(void *arg)
        return TEST_SUCCESS;
 }
 
+static int
+bler_pmd_lcore_ldpc_dec(void *arg)
+{
+       struct thread_params *tp = arg;
+       uint16_t enq, deq;
+       uint64_t total_time = 0, start_time;
+       const uint16_t queue_id = tp->queue_id;
+       const uint16_t burst_sz = tp->op_params->burst_sz;
+       const uint16_t num_ops = tp->op_params->num_to_process;
+       struct rte_bbdev_dec_op *ops_enq[num_ops];
+       struct rte_bbdev_dec_op *ops_deq[num_ops];
+       struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+       struct test_buffers *bufs = NULL;
+       int i, j, ret;
+       float parity_bler = 0;
+       struct rte_bbdev_info info;
+       uint16_t num_to_enq;
+       bool extDdr = check_bit(ldpc_cap_flags,
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
+       bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
+       bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
+
+       TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+                       "BURST_SIZE should be <= %u", MAX_BURST);
+
+       rte_bbdev_info_get(tp->dev_id, &info);
+
+       TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+                       "NUM_OPS cannot exceed %u for this device",
+                       info.drv.queue_size_lim);
+
+       bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+       while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+               rte_pause();
+
+       ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
+       TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
+
+       /* For BLER tests we need to enable early termination */
+       if (!check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
+               ref_op->ldpc_dec.op_flags +=
+                               RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+       ref_op->ldpc_dec.iter_max = get_iter_max();
+       ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
+
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+               copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
+                               bufs->hard_outputs, bufs->soft_outputs,
+                               bufs->harq_inputs, bufs->harq_outputs, ref_op);
+       generate_llr_input(num_ops, bufs->inputs, ref_op);
+
+       /* Set counter to validate the ordering */
+       for (j = 0; j < num_ops; ++j)
+               ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+       for (i = 0; i < 1; ++i) { /* Could add more iterations */
+               for (j = 0; j < num_ops; ++j) {
+                       if (!loopback)
+                               mbuf_reset(
+                               ops_enq[j]->ldpc_dec.hard_output.data);
+                       if (hc_out || loopback)
+                               mbuf_reset(
+                               ops_enq[j]->ldpc_dec.harq_combined_output.data);
+               }
+               if (extDdr) {
+                       bool preload = i == (TEST_REPETITIONS - 1);
+                       preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
+                                       num_ops, preload);
+               }
+               start_time = rte_rdtsc_precise();
+
+               for (enq = 0, deq = 0; enq < num_ops;) {
+                       num_to_enq = burst_sz;
+
+                       if (unlikely(num_ops - enq < num_to_enq))
+                               num_to_enq = num_ops - enq;
+
+                       enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
+                                       queue_id, &ops_enq[enq], num_to_enq);
+
+                       deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+                                       queue_id, &ops_deq[deq], enq - deq);
+               }
+
+               /* dequeue the remaining */
+               while (deq < enq) {
+                       deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+                                       queue_id, &ops_deq[deq], enq - deq);
+               }
+
+               total_time += rte_rdtsc_precise() - start_time;
+       }
+
+       tp->iter_count = 0;
+       tp->iter_average = 0;
+       /* get the max of iter_count for all dequeued ops */
+       for (i = 0; i < num_ops; ++i) {
+               tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
+                               tp->iter_count);
+               tp->iter_average += (double) ops_enq[i]->ldpc_dec.iter_count;
+               if (ops_enq[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR))
+                       parity_bler += 1.0;
+       }
+
+       parity_bler /= num_ops; /* This one is based on SYND */
+       tp->iter_average /= num_ops;
+       tp->bler = (double) validate_ldpc_bler(ops_deq, num_ops) / num_ops;
+
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE
+                       && tp->bler == 0
+                       && parity_bler == 0
+                       && !hc_out) {
+               ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
+                               tp->op_params->vector_mask);
+               TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+       }
+
+       rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
+
+       double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
+       tp->ops_per_sec = ((double)num_ops * 1) /
+                       ((double)total_time / (double)rte_get_tsc_hz());
+       tp->mbps = (((double)(num_ops * 1 * tb_len_bits)) /
+                       1000000.0) / ((double)total_time /
+                       (double)rte_get_tsc_hz());
+
+       return TEST_SUCCESS;
+}
+
 static int
 throughput_pmd_lcore_ldpc_dec(void *arg)
 {
@@ -2114,6 +3241,12 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
        int i, j, ret;
        struct rte_bbdev_info info;
        uint16_t num_to_enq;
+       bool extDdr = check_bit(ldpc_cap_flags,
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
+       bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
+       bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
+                       RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
 
        TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
                        "BURST_SIZE should be <= %u", MAX_BURST);
@@ -2137,7 +3270,7 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
                        RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
                ref_op->ldpc_dec.op_flags -=
                                RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
-       ref_op->ldpc_dec.iter_max = 6;
+       ref_op->ldpc_dec.iter_max = get_iter_max();
        ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
 
        if (test_vector.op_type != RTE_BBDEV_OP_NONE)
@@ -2151,13 +3284,18 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
 
        for (i = 0; i < TEST_REPETITIONS; ++i) {
                for (j = 0; j < num_ops; ++j) {
-                       mbuf_reset(ops_enq[j]->ldpc_dec.hard_output.data);
-                       if (check_bit(ref_op->ldpc_dec.op_flags,
-                                       RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
+                       if (!loopback)
+                               mbuf_reset(
+                               ops_enq[j]->ldpc_dec.hard_output.data);
+                       if (hc_out || loopback)
                                mbuf_reset(
                                ops_enq[j]->ldpc_dec.harq_combined_output.data);
                }
-
+               if (extDdr) {
+                       bool preload = i == (TEST_REPETITIONS - 1);
+                       preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
+                                       num_ops, preload);
+               }
                start_time = rte_rdtsc_precise();
 
                for (enq = 0, deq = 0; enq < num_ops;) {
@@ -2188,6 +3326,10 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
                tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
                                tp->iter_count);
        }
+       if (extDdr) {
+               /* Read loopback is not thread safe */
+               retrieve_harq_ddr(tp->dev_id, queue_id, ops_enq, num_ops);
+       }
 
        if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
                ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
@@ -2409,27 +3551,147 @@ print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
                used_cores, total_mops, total_mbps);
 }
 
+/* Aggregate the performance results over the number of cores used */
 static void
 print_dec_throughput(struct thread_params *t_params, unsigned int used_cores)
 {
-       unsigned int iter = 0;
+       unsigned int core_idx = 0;
        double total_mops = 0, total_mbps = 0;
        uint8_t iter_count = 0;
 
-       for (iter = 0; iter < used_cores; iter++) {
+       for (core_idx = 0; core_idx < used_cores; core_idx++) {
                printf(
                        "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
-                       t_params[iter].lcore_id, t_params[iter].ops_per_sec,
-                       t_params[iter].mbps, t_params[iter].iter_count);
-               total_mops += t_params[iter].ops_per_sec;
-               total_mbps += t_params[iter].mbps;
-               iter_count = RTE_MAX(iter_count, t_params[iter].iter_count);
+                       t_params[core_idx].lcore_id,
+                       t_params[core_idx].ops_per_sec,
+                       t_params[core_idx].mbps,
+                       t_params[core_idx].iter_count);
+               total_mops += t_params[core_idx].ops_per_sec;
+               total_mbps += t_params[core_idx].mbps;
+               iter_count = RTE_MAX(iter_count,
+                               t_params[core_idx].iter_count);
        }
        printf(
                "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps @ max %u iterations\n",
                used_cores, total_mops, total_mbps, iter_count);
 }
 
+/* Aggregate the performance results over the number of cores used */
+static void
+print_dec_bler(struct thread_params *t_params, unsigned int used_cores)
+{
+       unsigned int core_idx = 0;
+       double total_mbps = 0, total_bler = 0, total_iter = 0;
+       double snr = get_snr();
+
+       for (core_idx = 0; core_idx < used_cores; core_idx++) {
+               printf("Core%u BLER %.1f %% - Iters %.1f - Tp %.1f Mbps %s\n",
+                               t_params[core_idx].lcore_id,
+                               t_params[core_idx].bler * 100,
+                               t_params[core_idx].iter_average,
+                               t_params[core_idx].mbps,
+                               get_vector_filename());
+               total_mbps += t_params[core_idx].mbps;
+               total_bler += t_params[core_idx].bler;
+               total_iter += t_params[core_idx].iter_average;
+       }
+       total_bler /= used_cores;
+       total_iter /= used_cores;
+
+       printf("SNR %.2f BLER %.1f %% - Iterations %.1f %d - Tp %.1f Mbps %s\n",
+                       snr, total_bler * 100, total_iter, get_iter_max(),
+                       total_mbps, get_vector_filename());
+}
+
+/*
+ * Test function that determines BLER wireless performance
+ */
+static int
+bler_test(struct active_device *ad,
+               struct test_op_params *op_params)
+{
+       int ret;
+       unsigned int lcore_id, used_cores = 0;
+       struct thread_params *t_params;
+       struct rte_bbdev_info info;
+       lcore_function_t *bler_function;
+       uint16_t num_lcores;
+       const char *op_type_str;
+
+       rte_bbdev_info_get(ad->dev_id, &info);
+
+       op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
+       TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
+                       test_vector.op_type);
+
+       printf("+ ------------------------------------------------------- +\n");
+       printf("== test: bler\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
+                       info.dev_name, ad->nb_queues, op_params->burst_sz,
+                       op_params->num_to_process, op_params->num_lcores,
+                       op_type_str,
+                       intr_enabled ? "Interrupt mode" : "PMD mode",
+                       (double)rte_get_tsc_hz() / 1000000000.0);
+
+       /* Set number of lcores */
+       num_lcores = (ad->nb_queues < (op_params->num_lcores))
+                       ? ad->nb_queues
+                       : op_params->num_lcores;
+
+       /* Allocate memory for thread parameters structure */
+       t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
+                       RTE_CACHE_LINE_SIZE);
+       TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
+                       RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
+                               RTE_CACHE_LINE_SIZE));
+
+       if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+               bler_function = bler_pmd_lcore_ldpc_dec;
+       else
+               return TEST_SKIPPED;
+
+       rte_atomic16_set(&op_params->sync, SYNC_WAIT);
+
+       /* Master core is set at first entry */
+       t_params[0].dev_id = ad->dev_id;
+       t_params[0].lcore_id = rte_lcore_id();
+       t_params[0].op_params = op_params;
+       t_params[0].queue_id = ad->queue_ids[used_cores++];
+       t_params[0].iter_count = 0;
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (used_cores >= num_lcores)
+                       break;
+
+               t_params[used_cores].dev_id = ad->dev_id;
+               t_params[used_cores].lcore_id = lcore_id;
+               t_params[used_cores].op_params = op_params;
+               t_params[used_cores].queue_id = ad->queue_ids[used_cores];
+               t_params[used_cores].iter_count = 0;
+
+               rte_eal_remote_launch(bler_function,
+                               &t_params[used_cores++], lcore_id);
+       }
+
+       rte_atomic16_set(&op_params->sync, SYNC_START);
+       ret = bler_function(&t_params[0]);
+
+       /* Master core is always used */
+       for (used_cores = 1; used_cores < num_lcores; used_cores++)
+               ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
+
+       print_dec_bler(t_params, num_lcores);
+
+       /* Return if test failed */
+       if (ret) {
+               rte_free(t_params);
+               return ret;
+       }
+
+       /* Function to print something  here*/
+       rte_free(t_params);
+       return ret;
+}
+
 /*
  * Test function that determines how long an enqueue + dequeue of a burst
  * takes on available lcores.
@@ -2476,11 +3738,11 @@ throughput_test(struct active_device *ad,
                if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
                        throughput_function = throughput_intr_lcore_dec;
                else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
-                       throughput_function = throughput_intr_lcore_dec;
+                       throughput_function = throughput_intr_lcore_ldpc_dec;
                else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
                        throughput_function = throughput_intr_lcore_enc;
                else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
-                       throughput_function = throughput_intr_lcore_enc;
+                       throughput_function = throughput_intr_lcore_ldpc_enc;
                else
                        throughput_function = throughput_intr_lcore_enc;
 
@@ -2677,6 +3939,8 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
        uint16_t i, j, dequeued;
        struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
        uint64_t start_time = 0, last_time = 0;
+       bool extDdr = ldpc_cap_flags &
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
 
        for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
                uint16_t enq = 0, deq = 0;
@@ -2689,6 +3953,15 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
                ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
                TEST_ASSERT_SUCCESS(ret,
                                "rte_bbdev_dec_op_alloc_bulk() failed");
+
+               /* For latency tests we need to disable early termination */
+               if (check_bit(ref_op->ldpc_dec.op_flags,
+                               RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
+                       ref_op->ldpc_dec.op_flags -=
+                                       RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+               ref_op->ldpc_dec.iter_max = get_iter_max();
+               ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
+
                if (test_vector.op_type != RTE_BBDEV_OP_NONE)
                        copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
                                        bufs->inputs,
@@ -2698,6 +3971,10 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
                                        bufs->harq_outputs,
                                        ref_op);
 
+               if (extDdr)
+                       preload_harq_ddr(dev_id, queue_id, ops_enq,
+                                       burst_sz, true);
+
                /* Set counter to validate the ordering */
                for (j = 0; j < burst_sz; ++j)
                        ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
@@ -2724,6 +4001,9 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
                *min_time = RTE_MIN(*min_time, last_time);
                *total_time += last_time;
 
+               if (extDdr)
+                       retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
+
                if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
                        ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
                                        vector_mask);
@@ -2733,7 +4013,6 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
                rte_bbdev_dec_op_free_bulk(ops_enq, deq);
                dequeued += deq;
        }
-
        return i;
 }
 
@@ -2825,7 +4104,6 @@ latency_test_ldpc_enc(struct rte_mempool *mempool,
                        burst_sz = num_to_process - dequeued;
 
                ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
-
                TEST_ASSERT_SUCCESS(ret,
                                "rte_bbdev_enc_op_alloc_bulk() failed");
                if (test_vector.op_type != RTE_BBDEV_OP_NONE)
@@ -2840,11 +4118,6 @@ latency_test_ldpc_enc(struct rte_mempool *mempool,
 
                start_time = rte_rdtsc_precise();
 
-               /*
-                * printf("Latency Debug %d\n",
-                * ops_enq[0]->ldpc_enc.cb_params.z_c); REMOVEME
-                */
-
                enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
                                &ops_enq[enq], burst_sz);
                TEST_ASSERT(enq == burst_sz,
@@ -2870,11 +4143,6 @@ latency_test_ldpc_enc(struct rte_mempool *mempool,
                        TEST_ASSERT_SUCCESS(ret, "Validation failed!");
                }
 
-               /*
-                * printf("Ready to free - deq %d num_to_process %d\n", FIXME
-                *              deq, num_to_process);
-                * printf("cache %d\n", ops_enq[0]->mempool->cache_size);
-                */
                rte_bbdev_enc_op_free_bulk(ops_enq, deq);
                dequeued += deq;
        }
@@ -3072,6 +4340,8 @@ offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
        uint64_t enq_start_time, deq_start_time;
        uint64_t enq_sw_last_time, deq_last_time;
        struct rte_bbdev_stats stats;
+       bool extDdr = ldpc_cap_flags &
+                       RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
 
        for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
                uint16_t enq = 0, deq = 0;
@@ -3089,6 +4359,10 @@ offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
                                        bufs->harq_outputs,
                                        ref_op);
 
+               if (extDdr)
+                       preload_harq_ddr(dev_id, queue_id, ops_enq,
+                                       burst_sz, true);
+
                /* Start time meas for enqueue function offload latency */
                enq_start_time = rte_rdtsc_precise();
                do {
@@ -3096,13 +4370,13 @@ offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
                                        &ops_enq[enq], burst_sz - enq);
                } while (unlikely(burst_sz != enq));
 
+               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
                ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
                TEST_ASSERT_SUCCESS(ret,
                                "Failed to get stats for queue (%u) of device (%u)",
                                queue_id, dev_id);
 
-               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
-                               stats.acc_offload_cycles;
+               enq_sw_last_time -= stats.acc_offload_cycles;
                time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
                                enq_sw_last_time);
                time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
@@ -3135,9 +4409,14 @@ offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
 
                /* Dequeue remaining operations if needed*/
                while (burst_sz != deq)
-                       deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+                       deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
                                        &ops_deq[deq], burst_sz - deq);
 
+               if (extDdr) {
+                       /* Read loopback is not thread safe */
+                       retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
+               }
+
                rte_bbdev_dec_op_free_bulk(ops_enq, deq);
                dequeued += deq;
        }
@@ -3164,7 +4443,8 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
                        burst_sz = num_to_process - dequeued;
 
                ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
-               TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
+               TEST_ASSERT_SUCCESS(ret,
+                               "rte_bbdev_enc_op_alloc_bulk() failed");
                if (test_vector.op_type != RTE_BBDEV_OP_NONE)
                        copy_reference_enc_op(ops_enq, burst_sz, dequeued,
                                        bufs->inputs,
@@ -3178,13 +4458,13 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
                                        &ops_enq[enq], burst_sz - enq);
                } while (unlikely(burst_sz != enq));
 
+               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
+
                ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
                TEST_ASSERT_SUCCESS(ret,
                                "Failed to get stats for queue (%u) of device (%u)",
                                queue_id, dev_id);
-
-               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
-                               stats.acc_offload_cycles;
+               enq_sw_last_time -= stats.acc_offload_cycles;
                time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
                                enq_sw_last_time);
                time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
@@ -3246,7 +4526,8 @@ offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
                        burst_sz = num_to_process - dequeued;
 
                ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
-               TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
+               TEST_ASSERT_SUCCESS(ret,
+                               "rte_bbdev_enc_op_alloc_bulk() failed");
                if (test_vector.op_type != RTE_BBDEV_OP_NONE)
                        copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
                                        bufs->inputs,
@@ -3260,13 +4541,13 @@ offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
                                        &ops_enq[enq], burst_sz - enq);
                } while (unlikely(burst_sz != enq));
 
+               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
                ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
                TEST_ASSERT_SUCCESS(ret,
                                "Failed to get stats for queue (%u) of device (%u)",
                                queue_id, dev_id);
 
-               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
-                               stats.acc_offload_cycles;
+               enq_sw_last_time -= stats.acc_offload_cycles;
                time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
                                enq_sw_last_time);
                time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
@@ -3419,7 +4700,7 @@ static int
 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
                const uint16_t num_to_process, uint16_t burst_sz,
                uint64_t *deq_total_time, uint64_t *deq_min_time,
-               uint64_t *deq_max_time)
+               uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
 {
        int i, deq_total;
        struct rte_bbdev_dec_op *ops[MAX_BURST];
@@ -3433,7 +4714,12 @@ offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
 
                if (unlikely(num_to_process - deq_total < burst_sz))
                        burst_sz = num_to_process - deq_total;
-               rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
+               if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+                       rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id, ops,
+                                       burst_sz);
+               else
+                       rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops,
+                                       burst_sz);
 
                deq_last_time = rte_rdtsc_precise() - deq_start_time;
                *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
@@ -3448,7 +4734,7 @@ static int
 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
                const uint16_t num_to_process, uint16_t burst_sz,
                uint64_t *deq_total_time, uint64_t *deq_min_time,
-               uint64_t *deq_max_time)
+               uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
 {
        int i, deq_total;
        struct rte_bbdev_enc_op *ops[MAX_BURST];
@@ -3461,7 +4747,12 @@ offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
 
                if (unlikely(num_to_process - deq_total < burst_sz))
                        burst_sz = num_to_process - deq_total;
-               rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
+               if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+                       rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id, ops,
+                                       burst_sz);
+               else
+                       rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops,
+                                       burst_sz);
 
                deq_last_time = rte_rdtsc_precise() - deq_start_time;
                *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
@@ -3471,6 +4762,7 @@ offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
 
        return i;
 }
+
 #endif
 
 static int
@@ -3508,14 +4800,15 @@ offload_latency_empty_q_test(struct active_device *ad,
        printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
                        info.dev_name, burst_sz, num_to_process, op_type_str);
 
-       if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+       if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+                       op_type == RTE_BBDEV_OP_LDPC_DEC)
                iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
                                num_to_process, burst_sz, &deq_total_time,
-                               &deq_min_time, &deq_max_time);
+                               &deq_min_time, &deq_max_time, op_type);
        else
                iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
                                num_to_process, burst_sz, &deq_total_time,
-                               &deq_min_time, &deq_max_time);
+                               &deq_min_time, &deq_max_time, op_type);
 
        if (iter <= 0)
                return TEST_FAILED;
@@ -3535,6 +4828,12 @@ offload_latency_empty_q_test(struct active_device *ad,
 #endif
 }
 
+static int
+bler_tc(void)
+{
+       return run_test_case(bler_test);
+}
+
 static int
 throughput_tc(void)
 {
@@ -3565,6 +4864,16 @@ interrupt_tc(void)
        return run_test_case(throughput_test);
 }
 
+static struct unit_test_suite bbdev_bler_testsuite = {
+       .suite_name = "BBdev BLER Tests",
+       .setup = testsuite_setup,
+       .teardown = testsuite_teardown,
+       .unit_test_cases = {
+               TEST_CASE_ST(ut_setup, ut_teardown, bler_tc),
+               TEST_CASES_END() /**< NULL terminate unit test array */
+       }
+};
+
 static struct unit_test_suite bbdev_throughput_testsuite = {
        .suite_name = "BBdev Throughput Tests",
        .setup = testsuite_setup,
@@ -3616,6 +4925,7 @@ static struct unit_test_suite bbdev_interrupt_testsuite = {
        }
 };
 
+REGISTER_TEST_COMMAND(bler, bbdev_bler_testsuite);
 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);