+/*
+ * Generate Qm LLRS for Qm==4
+ * Modulation, AWGN and LLR estimation from max log development
+ */
+static void
+gen_qm4_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
+{
+ int qm = 4;
+ int qam = 16;
+ int m, k;
+ double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
+ /* 5.1.4 of TS38.211 */
+ const double symbols_I[16] = {1, 1, 3, 3, 1, 1, 3, 3,
+ -1, -1, -3, -3, -1, -1, -3, -3};
+ const double symbols_Q[16] = {1, 3, 1, 3, -1, -3, -1, -3,
+ 1, 3, 1, 3, -1, -3, -1, -3};
+ /* Average constellation point energy */
+ N0 *= 10.0;
+ for (k = 0; k < qm; k++)
+ b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
+ /* 5.1.4 of TS38.211 */
+ I = (1 - 2 * b[0]) * (2 - (1 - 2 * b[2]));
+ Q = (1 - 2 * b[1]) * (2 - (1 - 2 * b[3]));
+ /* AWGN channel */
+ I += sqrt(N0 / 2) * randn(0);
+ Q += sqrt(N0 / 2) * randn(1);
+ /*
+ * Calculate the log of the probability that each of
+ * the constellation points was transmitted
+ */
+ for (m = 0; m < qam; m++)
+ log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
+ + pow(Q - symbols_Q[m], 2.0)) / N0;
+ /* Calculate an LLR for each of the k_64QAM bits in the set */
+ for (k = 0; k < qm; k++) {
+ p0 = -999999;
+ p1 = -999999;
+ /* For each constellation point */
+ for (m = 0; m < qam; m++) {
+ if ((m >> (qm - k - 1)) & 1)
+ p1 = maxstar(p1, log_syml_prob[m]);
+ else
+ p0 = maxstar(p0, log_syml_prob[m]);
+ }
+ /* Calculate the LLR */
+ llr_ = p0 - p1;
+ llr_ *= (1 << ldpc_llr_decimals);
+ llr_ = round(llr_);
+ if (llr_ > llr_max)
+ llr_ = llr_max;
+ if (llr_ < -llr_max)
+ llr_ = -llr_max;
+ llrs[qm * i + k] = (int8_t) llr_;
+ }
+}
+
+static void
+gen_qm2_llr(int8_t *llrs, uint32_t j, double N0, double llr_max)
+{
+ double b, b1, n;
+ double coeff = 2.0 * sqrt(N0);
+
+ /* Ignore in vectors rare quasi null LLRs not to be saturated */
+ if (llrs[j] < 8 && llrs[j] > -8)
+ return;
+
+ /* Note don't change sign here */
+ n = randn(j % 2);
+ b1 = ((llrs[j] > 0 ? 2.0 : -2.0)
+ + coeff * n) / N0;
+ b = b1 * (1 << ldpc_llr_decimals);
+ b = round(b);
+ if (b > llr_max)
+ b = llr_max;
+ if (b < -llr_max)
+ b = -llr_max;
+ llrs[j] = (int8_t) b;
+}
+
+/* Generate LLR for a given SNR */
+static void
+generate_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_dec_op *ref_op)
+{
+ struct rte_mbuf *m;
+ uint16_t qm;
+ uint32_t i, j, e, range;
+ double N0, llr_max;
+
+ e = ref_op->ldpc_dec.cb_params.e;
+ qm = ref_op->ldpc_dec.q_m;
+ llr_max = (1 << (ldpc_llr_size - 1)) - 1;
+ range = e / qm;
+ N0 = 1.0 / pow(10.0, get_snr() / 10.0);
+
+ for (i = 0; i < n; ++i) {
+ m = inputs[i].data;
+ int8_t *llrs = rte_pktmbuf_mtod_offset(m, int8_t *, 0);
+ if (qm == 8) {
+ for (j = 0; j < range; ++j)
+ gen_qm8_llr(llrs, j, N0, llr_max);
+ } else if (qm == 6) {
+ for (j = 0; j < range; ++j)
+ gen_qm6_llr(llrs, j, N0, llr_max);
+ } else if (qm == 4) {
+ for (j = 0; j < range; ++j)
+ gen_qm4_llr(llrs, j, N0, llr_max);
+ } else {
+ for (j = 0; j < e; ++j)
+ gen_qm2_llr(llrs, j, N0, llr_max);
+ }
+ }
+}
+
+static void
+copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *hard_outputs,
+ struct rte_bbdev_op_data *soft_outputs,
+ struct rte_bbdev_op_data *harq_inputs,
+ struct rte_bbdev_op_data *harq_outputs,
+ struct rte_bbdev_dec_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
+ ops[i]->ldpc_dec.tb_params.ea =
+ ldpc_dec->tb_params.ea;
+ ops[i]->ldpc_dec.tb_params.eb =
+ ldpc_dec->tb_params.eb;
+ ops[i]->ldpc_dec.tb_params.c =
+ ldpc_dec->tb_params.c;
+ ops[i]->ldpc_dec.tb_params.cab =
+ ldpc_dec->tb_params.cab;
+ ops[i]->ldpc_dec.tb_params.r =
+ ldpc_dec->tb_params.r;
+ } else {
+ ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
+ }
+
+ ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
+ ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
+ ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
+ ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
+ ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
+ ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
+ ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
+ ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
+ ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
+
+ if (hard_outputs != NULL)
+ ops[i]->ldpc_dec.hard_output =
+ hard_outputs[start_idx + i];
+ if (inputs != NULL)
+ ops[i]->ldpc_dec.input =
+ inputs[start_idx + i];
+ if (soft_outputs != NULL)
+ ops[i]->ldpc_dec.soft_output =
+ soft_outputs[start_idx + i];
+ if (harq_inputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_input =
+ harq_inputs[start_idx + i];
+ if (harq_outputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_output =
+ harq_outputs[start_idx + i];
+ }
+}
+
+
+static void
+copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *outputs,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
+ for (i = 0; i < n; ++i) {
+ if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
+ ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
+ ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
+ ops[i]->ldpc_enc.tb_params.cab =
+ ldpc_enc->tb_params.cab;
+ ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
+ ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
+ } else {
+ ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
+ }
+ ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
+ ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
+ ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
+ ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
+ ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
+ ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
+ ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
+ ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
+ ops[i]->ldpc_enc.output = outputs[start_idx + i];
+ ops[i]->ldpc_enc.input = inputs[start_idx + i];
+ }
+}
+
+static int
+check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
+ unsigned int order_idx, const int expected_status)
+{
+ int status = op->status;
+ /* ignore parity mismatch false alarms for long iterations */
+ if (get_iter_max() >= 10) {
+ if (!(expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
+ (status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
+ printf("WARNING: Ignore Syndrome Check mismatch\n");
+ status -= (1 << RTE_BBDEV_SYNDROME_ERROR);
+ }
+ if ((expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
+ !(status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
+ printf("WARNING: Ignore Syndrome Check mismatch\n");
+ status += (1 << RTE_BBDEV_SYNDROME_ERROR);
+ }
+ }
+
+ TEST_ASSERT(status == expected_status,
+ "op_status (%d) != expected_status (%d)",
+ op->status, expected_status);
+
+ TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
+ "Ordering error, expected %p, got %p",
+ (void *)(uintptr_t)order_idx, op->opaque_data);
+
+ return TEST_SUCCESS;
+}
+
+static int
+check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
+ unsigned int order_idx, const int expected_status)
+{
+ TEST_ASSERT(op->status == expected_status,
+ "op_status (%d) != expected_status (%d)",
+ op->status, expected_status);
+
+ if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE)
+ TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
+ "Ordering error, expected %p, got %p",
+ (void *)(uintptr_t)order_idx, op->opaque_data);
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+validate_op_chain(struct rte_bbdev_op_data *op,
+ struct op_data_entries *orig_op)
+{
+ uint8_t i;
+ struct rte_mbuf *m = op->data;
+ uint8_t nb_dst_segments = orig_op->nb_segments;
+ uint32_t total_data_size = 0;
+
+ TEST_ASSERT(nb_dst_segments == m->nb_segs,
+ "Number of segments differ in original (%u) and filled (%u) op",
+ nb_dst_segments, m->nb_segs);
+
+ /* Validate each mbuf segment length */
+ for (i = 0; i < nb_dst_segments; ++i) {
+ /* Apply offset to the first mbuf segment */
+ uint16_t offset = (i == 0) ? op->offset : 0;
+ uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
+ total_data_size += orig_op->segments[i].length;
+
+ TEST_ASSERT(orig_op->segments[i].length == data_len,
+ "Length of segment differ in original (%u) and filled (%u) op",
+ orig_op->segments[i].length, data_len);
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
+ rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
+ data_len,
+ "Output buffers (CB=%u) are not equal", i);
+ m = m->next;
+ }
+
+ /* Validate total mbuf pkt length */
+ uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
+ TEST_ASSERT(total_data_size == pkt_len,
+ "Length of data differ in original (%u) and filled (%u) op",
+ total_data_size, pkt_len);
+
+ return TEST_SUCCESS;
+}
+
+/*
+ * Compute K0 for a given configuration for HARQ output length computation
+ * As per definition in 3GPP 38.212 Table 5.4.2.1-2
+ */
+static inline uint16_t
+get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
+{
+ if (rv_index == 0)
+ return 0;
+ uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
+ if (n_cb == n) {
+ if (rv_index == 1)
+ return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
+ else if (rv_index == 2)
+ return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
+ else
+ return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
+ }
+ /* LBRM case - includes a division by N */
+ if (rv_index == 1)
+ return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
+ / n) * z_c;
+ else if (rv_index == 2)
+ return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
+ / n) * z_c;
+ else
+ return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
+ / n) * z_c;
+}
+
+/* HARQ output length including the Filler bits */
+static inline uint16_t
+compute_harq_len(struct rte_bbdev_op_ldpc_dec *ops_ld)
+{
+ uint16_t k0 = 0;
+ uint8_t max_rv = (ops_ld->rv_index == 1) ? 3 : ops_ld->rv_index;
+ k0 = get_k0(ops_ld->n_cb, ops_ld->z_c, ops_ld->basegraph, max_rv);
+ /* Compute RM out size and number of rows */
+ uint16_t parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
+ * ops_ld->z_c - ops_ld->n_filler;
+ uint16_t deRmOutSize = RTE_MIN(
+ k0 + ops_ld->cb_params.e +
+ ((k0 > parity_offset) ?
+ 0 : ops_ld->n_filler),
+ ops_ld->n_cb);
+ uint16_t numRows = ((deRmOutSize + ops_ld->z_c - 1)
+ / ops_ld->z_c);
+ uint16_t harq_output_len = numRows * ops_ld->z_c;
+ return harq_output_len;
+}
+
+static inline int
+validate_op_harq_chain(struct rte_bbdev_op_data *op,
+ struct op_data_entries *orig_op,
+ struct rte_bbdev_op_ldpc_dec *ops_ld)
+{
+ uint8_t i;
+ uint32_t j, jj, k;
+ struct rte_mbuf *m = op->data;
+ uint8_t nb_dst_segments = orig_op->nb_segments;
+ uint32_t total_data_size = 0;
+ int8_t *harq_orig, *harq_out, abs_harq_origin;
+ uint32_t byte_error = 0, cum_error = 0, error;
+ int16_t llr_max = (1 << (ldpc_llr_size - ldpc_llr_decimals)) - 1;
+ int16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
+ uint16_t parity_offset;
+
+ TEST_ASSERT(nb_dst_segments == m->nb_segs,
+ "Number of segments differ in original (%u) and filled (%u) op",
+ nb_dst_segments, m->nb_segs);
+
+ /* Validate each mbuf segment length */
+ for (i = 0; i < nb_dst_segments; ++i) {
+ /* Apply offset to the first mbuf segment */
+ uint16_t offset = (i == 0) ? op->offset : 0;
+ uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
+ total_data_size += orig_op->segments[i].length;
+
+ TEST_ASSERT(orig_op->segments[i].length <
+ (uint32_t)(data_len + 64),
+ "Length of segment differ in original (%u) and filled (%u) op",
+ orig_op->segments[i].length, data_len);
+ harq_orig = (int8_t *) orig_op->segments[i].addr;
+ harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset);
+
+ if (!(ldpc_cap_flags &
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS
+ ) || (ops_ld->op_flags &
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+ data_len -= ops_ld->z_c;
+ parity_offset = data_len;
+ } else {
+ /* Compute RM out size and number of rows */
+ parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
+ * ops_ld->z_c - ops_ld->n_filler;
+ uint16_t deRmOutSize = compute_harq_len(ops_ld) -
+ ops_ld->n_filler;
+ if (data_len > deRmOutSize)
+ data_len = deRmOutSize;
+ if (data_len > orig_op->segments[i].length)
+ data_len = orig_op->segments[i].length;
+ }
+ /*
+ * HARQ output can have minor differences
+ * due to integer representation and related scaling
+ */
+ for (j = 0, jj = 0; j < data_len; j++, jj++) {
+ if (j == parity_offset) {
+ /* Special Handling of the filler bits */
+ for (k = 0; k < ops_ld->n_filler; k++) {
+ if (harq_out[jj] !=
+ llr_max_pre_scaling) {
+ printf("HARQ Filler issue %d: %d %d\n",
+ jj, harq_out[jj],
+ llr_max);
+ byte_error++;
+ }
+ jj++;
+ }
+ }
+ if (!(ops_ld->op_flags &
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+ if (ldpc_llr_decimals > 1)
+ harq_out[jj] = (harq_out[jj] + 1)
+ >> (ldpc_llr_decimals - 1);
+ /* Saturated to S7 */
+ if (harq_orig[j] > llr_max)
+ harq_orig[j] = llr_max;
+ if (harq_orig[j] < -llr_max)
+ harq_orig[j] = -llr_max;
+ }
+ if (harq_orig[j] != harq_out[jj]) {
+ error = (harq_orig[j] > harq_out[jj]) ?
+ harq_orig[j] - harq_out[jj] :
+ harq_out[jj] - harq_orig[j];
+ abs_harq_origin = harq_orig[j] > 0 ?
+ harq_orig[j] :
+ -harq_orig[j];
+ /* Residual quantization error */
+ if ((error > 8 && (abs_harq_origin <
+ (llr_max - 16))) ||
+ (error > 16)) {
+ printf("HARQ mismatch %d: exp %d act %d => %d\n",
+ j, harq_orig[j],
+ harq_out[jj], error);
+ byte_error++;
+ cum_error += error;
+ }
+ }
+ }
+ m = m->next;
+ }
+
+ if (byte_error)
+ TEST_ASSERT(byte_error <= 1,
+ "HARQ output mismatch (%d) %d",
+ byte_error, cum_error);
+
+ /* Validate total mbuf pkt length */
+ uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
+ TEST_ASSERT(total_data_size < pkt_len + 64,
+ "Length of data differ in original (%u) and filled (%u) op",
+ total_data_size, pkt_len);
+
+ return TEST_SUCCESS;
+}
+
+static int
+validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
+ struct rte_bbdev_dec_op *ref_op, const int vector_mask)