#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_hexdump.h>
+#include <rte_interrupts.h>
+
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+#include <fpga_lte_fec.h>
+#endif
#include "main.h"
#include "test_bbdev_vector.h"
#define MAX_QUEUES RTE_MAX_LCORE
#define TEST_REPETITIONS 1000
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+#define FPGA_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
+#define FPGA_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
+#define VF_UL_QUEUE_VALUE 4
+#define VF_DL_QUEUE_VALUE 4
+#define UL_BANDWIDTH 3
+#define DL_BANDWIDTH 3
+#define UL_LOAD_BALANCE 128
+#define DL_LOAD_BALANCE 128
+#define FLR_TIMEOUT 610
+#endif
+
#define OPS_CACHE_SIZE 256U
#define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
struct rte_mempool *in_mbuf_pool;
struct rte_mempool *hard_out_mbuf_pool;
struct rte_mempool *soft_out_mbuf_pool;
+ struct rte_mempool *harq_in_mbuf_pool;
+ struct rte_mempool *harq_out_mbuf_pool;
} active_devs[RTE_BBDEV_MAX_DEVS];
static uint8_t nb_active_devs;
struct rte_bbdev_op_data *inputs;
struct rte_bbdev_op_data *hard_outputs;
struct rte_bbdev_op_data *soft_outputs;
+ struct rte_bbdev_op_data *harq_inputs;
+ struct rte_bbdev_op_data *harq_outputs;
};
/* Operation parameters specific for given test case */
} while (m != NULL);
}
+/* Read flag value 0/1 from bitmap */
+static inline bool
+check_bit(uint32_t bitmap, uint32_t bitmask)
+{
+ return bitmap & bitmask;
+}
+
static inline void
set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
{
check_dev_cap(const struct rte_bbdev_info *dev_info)
{
unsigned int i;
- unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
+ unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
+ nb_harq_inputs, nb_harq_outputs;
const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
+ nb_harq_inputs = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
+ nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
if (op_cap->type != test_vector.op_type)
!(cap->capability_flags &
RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
printf(
- "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
+ "INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
dev_info->dev_name);
clear_soft_out_cap(
&test_vector.turbo_dec.op_flags);
if (nb_hard_outputs > cap->num_buffers_dst) {
printf(
"Too many hard outputs defined: %u, max: %u\n",
- nb_hard_outputs, cap->num_buffers_src);
+ nb_hard_outputs, cap->num_buffers_dst);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
+ const struct rte_bbdev_op_cap_ldpc_enc *cap =
+ &op_cap->cap.ldpc_enc;
+
+ if (!flags_match(test_vector.ldpc_enc.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_dst) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs, cap->num_buffers_dst);
return TEST_FAILED;
}
if (intr_enabled && !(cap->capability_flags &
return TEST_FAILED;
}
+ return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
+ const struct rte_bbdev_op_cap_ldpc_dec *cap =
+ &op_cap->cap.ldpc_dec;
+
+ if (!flags_match(test_vector.ldpc_dec.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_inputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ inputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
return TEST_SUCCESS;
}
}
&test_vector.entries[DATA_HARD_OUTPUT];
struct op_data_entries *soft_out =
&test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_in =
+ &test_vector.entries[DATA_HARQ_INPUT];
+ struct op_data_entries *harq_out =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
/* allocate ops mempool */
ops_pool_size = optimal_mempool_size(RTE_MAX(
socket_id);
ad->hard_out_mbuf_pool = mp;
- if (soft_out->nb_segments == 0)
- return TEST_SUCCESS;
/* Soft outputs */
- mbuf_pool_size = optimal_mempool_size(ops_pool_size *
- soft_out->nb_segments);
- mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
- "soft_out");
- TEST_ASSERT_NOT_NULL(mp,
- "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
- mbuf_pool_size,
- ad->dev_id,
- socket_id);
- ad->soft_out_mbuf_pool = mp;
+ if (soft_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ soft_out->nb_segments);
+ mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "soft_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->soft_out_mbuf_pool = mp;
+ }
- return 0;
+ /* HARQ inputs */
+ if (harq_in->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_in->nb_segments);
+ mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_in");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_in_mbuf_pool = mp;
+ }
+
+ /* HARQ outputs */
+ if (harq_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_out->nb_segments);
+ mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_out_mbuf_pool = mp;
+ }
+
+ return TEST_SUCCESS;
}
static int
unsigned int nb_queues;
enum rte_bbdev_op_type op_type = vector->op_type;
+/* Configure fpga lte fec with PF & VF values
+ * if '-i' flag is set and using fpga device
+ */
+#ifndef RTE_BUILD_SHARED_LIB
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+ if ((get_init_device() == true) &&
+ (!strcmp(info->drv.driver_name, FPGA_PF_DRIVER_NAME))) {
+ struct fpga_lte_fec_conf conf;
+ unsigned int i;
+
+ printf("Configure FPGA FEC Driver %s with default values\n",
+ info->drv.driver_name);
+
+ /* clear default configuration before initialization */
+ memset(&conf, 0, sizeof(struct fpga_lte_fec_conf));
+
+ /* Set PF mode :
+ * true if PF is used for data plane
+ * false for VFs
+ */
+ conf.pf_mode_en = true;
+
+ for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
+ /* Number of UL queues per VF (fpga supports 8 VFs) */
+ conf.vf_ul_queues_number[i] = VF_UL_QUEUE_VALUE;
+ /* Number of DL queues per VF (fpga supports 8 VFs) */
+ conf.vf_dl_queues_number[i] = VF_DL_QUEUE_VALUE;
+ }
+
+ /* UL bandwidth. Needed for schedule algorithm */
+ conf.ul_bandwidth = UL_BANDWIDTH;
+ /* DL bandwidth */
+ conf.dl_bandwidth = DL_BANDWIDTH;
+
+ /* UL & DL load Balance Factor to 64 */
+ conf.ul_load_balance = UL_LOAD_BALANCE;
+ conf.dl_load_balance = DL_LOAD_BALANCE;
+
+ /**< FLR timeout value */
+ conf.flr_time_out = FLR_TIMEOUT;
+
+ /* setup FPGA PF with configuration information */
+ ret = fpga_lte_fec_configure(info->dev_name, &conf);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to configure 4G FPGA PF for bbdev %s",
+ info->dev_name);
+ }
+#endif
+#endif
nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
+ nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
+
/* setup device */
ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
if (ret < 0) {
bufs[i].offset = 0;
bufs[i].length = 0;
- if (op_type == DATA_INPUT) {
+ if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
data = rte_pktmbuf_append(m_head, seg->length);
TEST_ASSERT_NOT_NULL(data,
"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
"Couldn't chain mbufs from %d data type mbuf pool",
op_type);
}
-
} else {
/* allocate chained-mbuf for output buffer */
static void
limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
- uint16_t n, int8_t max_llr_modulus)
+ const uint16_t n, const int8_t max_llr_modulus)
{
uint16_t i, byte_idx;
}
}
+static void
+ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
+ const uint16_t n, const int8_t llr_size,
+ const int8_t llr_decimals)
+{
+ if (input_ops == NULL)
+ return;
+
+ uint16_t i, byte_idx;
+
+ int16_t llr_max, llr_min, llr_tmp;
+ llr_max = (1 << (llr_size - 1)) - 1;
+ llr_min = -llr_max;
+ for (i = 0; i < n; ++i) {
+ struct rte_mbuf *m = input_ops[i].data;
+ while (m != NULL) {
+ int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
+ input_ops[i].offset);
+ for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
+ ++byte_idx) {
+
+ llr_tmp = llr[byte_idx];
+ if (llr_decimals == 2)
+ llr_tmp *= 2;
+ else if (llr_decimals == 0)
+ llr_tmp /= 2;
+ llr_tmp = RTE_MIN(llr_max,
+ RTE_MAX(llr_min, llr_tmp));
+ llr[byte_idx] = (int8_t) llr_tmp;
+ }
+
+ m = m->next;
+ }
+ }
+}
+
+
+
static int
fill_queue_buffers(struct test_op_params *op_params,
struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
- struct rte_mempool *soft_out_mp, uint16_t queue_id,
+ struct rte_mempool *soft_out_mp,
+ struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
+ uint16_t queue_id,
const struct rte_bbdev_op_cap *capabilities,
uint16_t min_alignment, const int socket_id)
{
in_mp,
soft_out_mp,
hard_out_mp,
+ harq_in_mp,
+ harq_out_mp,
};
struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
&op_params->q_bufs[socket_id][queue_id].inputs,
&op_params->q_bufs[socket_id][queue_id].soft_outputs,
&op_params->q_bufs[socket_id][queue_id].hard_outputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_inputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_outputs,
};
for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
capabilities->cap.turbo_dec.max_llr_modulus);
+ if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ }
+
return 0;
}
rte_mempool_free(ad->in_mbuf_pool);
rte_mempool_free(ad->hard_out_mbuf_pool);
rte_mempool_free(ad->soft_out_mbuf_pool);
+ rte_mempool_free(ad->harq_in_mbuf_pool);
+ rte_mempool_free(ad->harq_out_mbuf_pool);
for (i = 0; i < rte_lcore_count(); ++i) {
for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
rte_free(op_params->q_bufs[j][i].inputs);
rte_free(op_params->q_bufs[j][i].hard_outputs);
rte_free(op_params->q_bufs[j][i].soft_outputs);
+ rte_free(op_params->q_bufs[j][i].harq_inputs);
+ rte_free(op_params->q_bufs[j][i].harq_outputs);
}
}
}
}
}
+static void
+copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *hard_outputs,
+ struct rte_bbdev_op_data *soft_outputs,
+ struct rte_bbdev_op_data *harq_inputs,
+ struct rte_bbdev_op_data *harq_outputs,
+ struct rte_bbdev_dec_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ if (ldpc_dec->code_block_mode == 0) {
+ ops[i]->ldpc_dec.tb_params.ea =
+ ldpc_dec->tb_params.ea;
+ ops[i]->ldpc_dec.tb_params.eb =
+ ldpc_dec->tb_params.eb;
+ ops[i]->ldpc_dec.tb_params.c =
+ ldpc_dec->tb_params.c;
+ ops[i]->ldpc_dec.tb_params.cab =
+ ldpc_dec->tb_params.cab;
+ ops[i]->ldpc_dec.tb_params.r =
+ ldpc_dec->tb_params.r;
+ } else {
+ ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
+ }
+
+ ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
+ ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
+ ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
+ ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
+ ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
+ ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
+ ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
+ ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
+ ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
+
+ ops[i]->ldpc_dec.hard_output = hard_outputs[start_idx + i];
+ ops[i]->ldpc_dec.input = inputs[start_idx + i];
+ if (soft_outputs != NULL)
+ ops[i]->ldpc_dec.soft_output =
+ soft_outputs[start_idx + i];
+ if (harq_inputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_input =
+ harq_inputs[start_idx + i];
+ if (harq_outputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_output =
+ harq_outputs[start_idx + i];
+ }
+}
+
+
+static void
+copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *outputs,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
+ for (i = 0; i < n; ++i) {
+ if (ldpc_enc->code_block_mode == 0) {
+ ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
+ ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
+ ops[i]->ldpc_enc.tb_params.cab =
+ ldpc_enc->tb_params.cab;
+ ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
+ ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
+ } else {
+ ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
+ }
+ ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
+ ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
+ ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
+ ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
+ ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
+ ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
+ ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
+ ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
+ ops[i]->ldpc_enc.output = outputs[start_idx + i];
+ ops[i]->ldpc_enc.input = inputs[start_idx + i];
+ }
+}
+
static int
check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
unsigned int order_idx, const int expected_status)
return TEST_SUCCESS;
}
+
+static int
+validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
+ struct rte_bbdev_dec_op *ref_op, const int vector_mask)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+ struct op_data_entries *soft_data_orig =
+ &test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_data_orig =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
+ struct rte_bbdev_op_ldpc_dec *ops_td;
+ struct rte_bbdev_op_data *hard_output;
+ struct rte_bbdev_op_data *harq_output;
+ struct rte_bbdev_op_data *soft_output;
+ struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ ops_td = &ops[i]->ldpc_dec;
+ hard_output = &ops_td->hard_output;
+ harq_output = &ops_td->harq_combined_output;
+ soft_output = &ops_td->soft_output;
+
+ ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for decoder failed");
+ if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
+ TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
+ "Returned iter_count (%d) > expected iter_count (%d)",
+ ops_td->iter_count, ref_td->iter_count);
+ /* We can ignore data when the decoding failed to converge */
+ if ((ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR)) == 0)
+ TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
+ hard_data_orig),
+ "Hard output buffers (CB=%u) are not equal",
+ i);
+
+ if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
+ TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
+ soft_data_orig),
+ "Soft output buffers (CB=%u) are not equal",
+ i);
+ if (ref_op->ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
+ ldpc_input_llr_scaling(harq_output, 1, 8, 0);
+ TEST_ASSERT_SUCCESS(validate_op_chain(harq_output,
+ harq_data_orig),
+ "HARQ output buffers (CB=%u) are not equal",
+ i);
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+
static int
validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
struct rte_bbdev_enc_op *ref_op)
return TEST_SUCCESS;
}
+static int
+validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+
+ for (i = 0; i < n; ++i) {
+ ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for encoder failed");
+ TEST_ASSERT_SUCCESS(validate_op_chain(
+ &ops[i]->ldpc_enc.output,
+ hard_data_orig),
+ "Output buffers (CB=%u) are not equal",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
static void
create_reference_dec_op(struct rte_bbdev_dec_op *op)
{
entry->segments[i].length;
}
+static void
+create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_dec = test_vector.ldpc_dec;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.input.length +=
+ entry->segments[i].length;
+ if (test_vector.ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
+ entry = &test_vector.entries[DATA_HARQ_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.harq_combined_input.length +=
+ entry->segments[i].length;
+ }
+}
+
+
static void
create_reference_enc_op(struct rte_bbdev_enc_op *op)
{
entry->segments[i].length;
}
+static void
+create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_enc = test_vector.ldpc_enc;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_enc.input.length +=
+ entry->segments[i].length;
+}
+
static uint32_t
calc_dec_TB_size(struct rte_bbdev_dec_op *op)
{
return tb_size;
}
+static uint32_t
+calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
+
+ if (op->ldpc_dec.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
+ } else {
+ c = op->ldpc_dec.tb_params.c;
+ r = op->ldpc_dec.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_dec.z_c
+ - op->ldpc_dec.n_filler;
+ }
+ return tb_size;
+}
+
static uint32_t
calc_enc_TB_size(struct rte_bbdev_enc_op *op)
{
return tb_size;
}
+static uint32_t
+calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
+
+ if (op->turbo_enc.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
+ } else {
+ c = op->turbo_enc.tb_params.c;
+ r = op->turbo_enc.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_enc.z_c
+ - op->ldpc_enc.n_filler;
+ }
+ return tb_size;
+}
+
+
static int
init_test_op_params(struct test_op_params *op_params,
enum rte_bbdev_op_type op_type, const int expected_status,
uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
{
int ret = 0;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
&op_params->ref_dec_op, 1);
else
op_params->num_to_process = num_to_process;
op_params->num_lcores = num_lcores;
op_params->vector_mask = vector_mask;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
op_params->ref_dec_op->status = expected_status;
- else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC
+ || op_type == RTE_BBDEV_OP_LDPC_ENC)
op_params->ref_enc_op->status = expected_status;
-
return 0;
}
goto fail;
}
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
- /* Find Decoder capabilities */
- const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
- while (cap->type != RTE_BBDEV_OP_NONE) {
- if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
- capabilities = cap;
- break;
- }
- }
- TEST_ASSERT_NOT_NULL(capabilities,
- "Couldn't find Decoder capabilities");
- create_reference_dec_op(op_params->ref_dec_op);
+ /* Find capabilities */
+ const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
+ for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
+ if (cap->type == test_vector.op_type) {
+ capabilities = cap;
+ break;
+ }
+ cap++;
+ }
+ TEST_ASSERT_NOT_NULL(capabilities,
+ "Couldn't find capabilities");
+
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
+ create_reference_dec_op(op_params->ref_dec_op);
} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
create_reference_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ create_reference_ldpc_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ create_reference_ldpc_dec_op(op_params->ref_dec_op);
for (i = 0; i < ad->nb_queues; ++i) {
f_ret = fill_queue_buffers(op_params,
ad->in_mbuf_pool,
ad->hard_out_mbuf_pool,
ad->soft_out_mbuf_pool,
+ ad->harq_in_mbuf_pool,
+ ad->harq_out_mbuf_pool,
ad->queue_ids[i],
capabilities,
info.drv.min_alignment,
uint16_t deq, burst_sz, num_ops;
uint16_t queue_id = *(uint16_t *) ret_param;
struct rte_bbdev_info info;
-
double tb_len_bits;
-
struct thread_params *tp = cb_arg;
/* Find matching thread params using queue_id */
burst_sz = rte_atomic16_read(&tp->burst_sz);
num_ops = tp->op_params->num_to_process;
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
rte_atomic16_read(&tp->nb_dequeued)],
struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
+ rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
}
if (ret) {
case RTE_BBDEV_OP_TURBO_ENC:
tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
break;
+ case RTE_BBDEV_OP_LDPC_DEC:
+ tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
+ break;
+ case RTE_BBDEV_OP_LDPC_ENC:
+ tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
+ break;
case RTE_BBDEV_OP_NONE:
tb_len_bits = 0.0;
break;
enq = 0;
do {
enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
+ queue_id, &ops[enqueued],
+ num_to_enq);
} while (unlikely(num_to_enq != enq));
enqueued += enq;
rte_atomic16_set(&tp->burst_sz, num_to_enq);
/* Wait until processing of previous batch is
- * completed.
+ * completed
*/
while (rte_atomic16_read(&tp->nb_dequeued) !=
(int16_t) enqueued)
rte_atomic16_set(&tp->burst_sz, num_to_enq);
/* Wait until processing of previous batch is
- * completed.
+ * completed
*/
while (rte_atomic16_read(&tp->nb_dequeued) !=
(int16_t) enqueued)
return TEST_SUCCESS;
}
+static int
+throughput_pmd_lcore_ldpc_dec(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_dec_op *ops_enq[num_ops];
+ struct rte_bbdev_dec_op *ops_deq[num_ops];
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
+
+ /* For throughput tests we need to disable early termination */
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
+ ref_op->ldpc_dec.op_flags -=
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+ ref_op->ldpc_dec.iter_max = 6;
+ ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, bufs->soft_outputs,
+ bufs->harq_inputs, bufs->harq_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+ for (j = 0; j < num_ops; ++j) {
+ mbuf_reset(ops_enq[j]->ldpc_dec.hard_output.data);
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
+ mbuf_reset(
+ ops_enq[j]->ldpc_dec.harq_combined_output.data);
+ }
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ tp->iter_count = 0;
+ /* get the max of iter_count for all dequeued ops */
+ for (i = 0; i < num_ops; ++i) {
+ tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
+ tp->iter_count);
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
+ 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
static int
throughput_pmd_lcore_enc(void *arg)
{
TEST_ASSERT_SUCCESS(ret, "Validation failed!");
}
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
double tb_len_bits = calc_enc_TB_size(ref_op);
tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
return TEST_SUCCESS;
}
+static int
+throughput_pmd_lcore_ldpc_enc(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_enc_op *ops_enq[num_ops];
+ struct rte_bbdev_enc_op *ops_deq[num_ops];
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
+ num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
+ num_ops);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ for (j = 0; j < num_ops; ++j)
+ mbuf_reset(ops_enq[j]->turbo_enc.output.data);
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
+ / 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
static void
print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
{
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
test_vector.op_type);
- printf(
- "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
info.dev_name, ad->nb_queues, op_params->burst_sz,
op_params->num_to_process, op_params->num_lcores,
op_type_str,
if (intr_enabled) {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_intr_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_intr_lcore_enc;
else
throughput_function = throughput_intr_lcore_enc;
} else {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_pmd_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_pmd_lcore_ldpc_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_pmd_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_pmd_lcore_ldpc_enc;
else
throughput_function = throughput_pmd_lcore_enc;
}
/* Print throughput if interrupts are disabled and test passed */
if (!intr_enabled) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
else
print_enc_throughput(t_params, num_lcores);
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= rte_atomic16_read(&tp->processing_status);
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
/* Wait for slave lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= rte_atomic16_read(&tp->processing_status);
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
}
/* Print throughput if test passed */
if (!ret) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
- else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
print_enc_throughput(t_params, num_lcores);
}
return i;
}
+static int
+latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
+ int vector_mask, uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_dec_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time = rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
+ vector_mask);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
static int
latency_test_enc(struct rte_mempool *mempool,
struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
return i;
}
+static int
+latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
+ uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_enc_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ /*
+ * printf("Latency Debug %d\n",
+ * ops_enq[0]->ldpc_enc.cb_params.z_c); REMOVEME
+ */
+
+ enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time += rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_enc_op(ops_deq, burst_sz, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ /*
+ * printf("Ready to free - deq %d num_to_process %d\n", FIXME
+ * deq, num_to_process);
+ * printf("cache %d\n", ops_enq[0]->mempool->cache_size);
+ */
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
static int
latency_test(struct active_device *ad,
struct test_op_params *op_params)
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nValidation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: validation/latency\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
op_params->ref_dec_op, op_params->vector_mask,
ad->dev_id, queue_id, num_to_process,
burst_sz, &total_time, &min_time, &max_time);
- else
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
iter = latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &total_time,
&min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, op_params->vector_mask,
+ ad->dev_id, queue_id, num_to_process,
+ burst_sz, &total_time, &min_time, &max_time);
+ else
+ iter = latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op,
+ ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
if (iter <= 0)
return TEST_FAILED;
printf("Operation latency:\n"
- "\tavg latency: %lg cycles, %lg us\n"
- "\tmin latency: %lg cycles, %lg us\n"
- "\tmax latency: %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)total_time / (double)iter,
(double)(total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)min_time,
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
- TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
- burst_sz);
-
+ rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_dec_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
return i;
}
+static int
+offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ /* Dequeue remaining operations if needed*/
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
static int
offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
- TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
- burst_sz);
-
+ rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_enc_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
return i;
}
+
+static int
+offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
#endif
static int
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_test_dec(op_params->mp, bufs,
op_params->ref_dec_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ iter = offload_latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
else
iter = offload_latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
if (iter <= 0)
return TEST_FAILED;
- printf("Enqueue offload cost latency:\n"
- "\tDriver offload avg %lg cycles, %lg us\n"
- "\tDriver offload min %lg cycles, %lg us\n"
- "\tDriver offload max %lg cycles, %lg us\n"
- "\tAccelerator offload avg %lg cycles, %lg us\n"
- "\tAccelerator offload min %lg cycles, %lg us\n"
- "\tAccelerator offload max %lg cycles, %lg us\n",
+ printf("Enqueue driver offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n"
+ "Enqueue accelerator offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.enq_sw_total_time / (double)iter,
(double)(time_st.enq_sw_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
rte_get_tsc_hz());
printf("Dequeue offload cost latency - one op:\n"
- "\tavg %lg cycles, %lg us\n"
- "\tmin %lg cycles, %lg us\n"
- "\tmax %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.deq_total_time / (double)iter,
(double)(time_st.deq_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
if (iter <= 0)
return TEST_FAILED;
- printf("Empty dequeue offload\n"
- "\tavg. latency: %lg cycles, %lg us\n"
- "\tmin. latency: %lg cycles, %lg us\n"
- "\tmax. latency: %lg cycles, %lg us\n",
+ printf("Empty dequeue offload:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)deq_total_time / (double)iter,
(double)(deq_total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)deq_min_time,
"input",
"soft_output",
"hard_output",
+ "harq_input",
+ "harq_output",
};
/* trim leading and trailing spaces */
}
values[n_tokens] = (uint32_t) strtoul(tok, &error, 0);
+
if ((error == NULL) || (*error != '\0')) {
printf("Failed with convert '%s'\n", tok);
rte_free(values);
return 0;
}
+/* convert LDPC flag from string to unsigned long int*/
+static int
+op_ldpc_decoder_flag_strtoul(char *token, uint32_t *op_flag_value)
+{
+ if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DECODE_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DECODE_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEC_INTERRUPTS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEC_INTERRUPTS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEC_SCATTER_GATHER"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEC_SCATTER_GATHER;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION"))
+ *op_flag_value = RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_LLR_COMPRESSION"))
+ *op_flag_value = RTE_BBDEV_LDPC_LLR_COMPRESSION;
+ else if (!strcmp(token,
+ "RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
+ else if (!strcmp(token,
+ "RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
+ else {
+ printf("The given value is not a LDPC decoder flag\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* convert turbo encoder flag from string to unsigned long int*/
static int
op_encoder_flag_strtoul(char *token, uint32_t *op_flag_value)
return 0;
}
+/* convert LDPC encoder flag from string to unsigned long int*/
+static int
+op_ldpc_encoder_flag_strtoul(char *token, uint32_t *op_flag_value)
+{
+ if (!strcmp(token, "RTE_BBDEV_LDPC_INTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_RATE_MATCH"))
+ *op_flag_value = RTE_BBDEV_LDPC_RATE_MATCH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_24A_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_24A_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_24B_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_24B_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_16_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_16_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ENC_INTERRUPTS"))
+ *op_flag_value = RTE_BBDEV_LDPC_ENC_INTERRUPTS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ENC_SCATTER_GATHER"))
+ *op_flag_value = RTE_BBDEV_LDPC_ENC_SCATTER_GATHER;
+ else {
+ printf("The given value is not a turbo encoder flag\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* tokenization turbo decoder/encoder flags values separated by a comma */
static int
parse_turbo_flags(char *tokens, uint32_t *op_flags,
} else if (op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (op_encoder_flag_strtoul(tok, &op_flag_value) == -1)
return -1;
+ } else if (op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (op_ldpc_encoder_flag_strtoul(tok, &op_flag_value)
+ == -1)
+ return -1;
+ } else if (op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (op_ldpc_decoder_flag_strtoul(tok, &op_flag_value)
+ == -1)
+ return -1;
} else {
return -1;
}
*op_type = RTE_BBDEV_OP_TURBO_DEC;
else if (!strcmp(token, "RTE_BBDEV_OP_TURBO_ENC"))
*op_type = RTE_BBDEV_OP_TURBO_ENC;
+ else if (!strcmp(token, "RTE_BBDEV_OP_LDPC_ENC"))
+ *op_type = RTE_BBDEV_OP_LDPC_ENC;
+ else if (!strcmp(token, "RTE_BBDEV_OP_LDPC_DEC"))
+ *op_type = RTE_BBDEV_OP_LDPC_DEC;
else if (!strcmp(token, "RTE_BBDEV_OP_NONE"))
*op_type = RTE_BBDEV_OP_NONE;
else {
*status = *status | (1 << RTE_BBDEV_DRV_ERROR);
else if (!strcmp(tok, "FCW"))
*status = *status | (1 << RTE_BBDEV_DATA_ERROR);
+ else if (!strcmp(tok, "SYNCRC")) {
+ *status = *status | (1 << RTE_BBDEV_SYNDROME_ERROR);
+ *status = *status | (1 << RTE_BBDEV_CRC_ERROR);
+ } else if (!strcmp(tok, "SYN"))
+ *status = *status | (1 << RTE_BBDEV_SYNDROME_ERROR);
else if (!strcmp(tok, "CRC")) {
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if ((op_type == RTE_BBDEV_OP_TURBO_DEC) ||
+ (op_type == RTE_BBDEV_OP_LDPC_DEC))
*status = *status | (1 << RTE_BBDEV_CRC_ERROR);
else {
printf(
- "CRC is only a valid value for turbo decoder\n");
+ "CRC is only a valid value for decoder\n");
return -1;
}
} else {
ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
} else if (!strcmp(key_token, "r")) {
vector->mask |= TEST_BBDEV_VF_R;
- turbo_dec->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ turbo_dec->tb_params.r = (uint8_t)strtoul(token, &err, 0);
ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
} else if (!strcmp(key_token, "code_block_mode")) {
vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
return 0;
}
+
+/* parses LDPC encoder parameters and assigns to global variable */
+static int
+parse_ldpc_encoder_params(const char *key_token, char *token,
+ struct test_bbdev_vector *vector)
+{
+ int ret = 0, status = 0;
+ uint32_t op_flags = 0;
+ char *err = NULL;
+
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &vector->ldpc_enc;
+
+ if (starts_with(key_token, op_data_prefixes[DATA_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_INPUT,
+ op_data_prefixes[DATA_INPUT]);
+ else if (starts_with(key_token, "output"))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARD_OUTPUT,
+ "output");
+ else if (!strcmp(key_token, "e")) {
+ vector->mask |= TEST_BBDEV_VF_E;
+ ldpc_enc->cb_params.e = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "ea")) {
+ vector->mask |= TEST_BBDEV_VF_EA;
+ ldpc_enc->tb_params.ea = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "eb")) {
+ vector->mask |= TEST_BBDEV_VF_EB;
+ ldpc_enc->tb_params.eb = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "c")) {
+ vector->mask |= TEST_BBDEV_VF_C;
+ ldpc_enc->tb_params.c = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "cab")) {
+ vector->mask |= TEST_BBDEV_VF_CAB;
+ ldpc_enc->tb_params.cab = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "rv_index")) {
+ vector->mask |= TEST_BBDEV_VF_RV_INDEX;
+ ldpc_enc->rv_index = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_cb")) {
+ vector->mask |= TEST_BBDEV_VF_NCB;
+ ldpc_enc->n_cb = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "r")) {
+ vector->mask |= TEST_BBDEV_VF_R;
+ ldpc_enc->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "q_m")) {
+ vector->mask |= TEST_BBDEV_VF_QM;
+ ldpc_enc->q_m = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "basegraph")) {
+ vector->mask |= TEST_BBDEV_VF_BG;
+ ldpc_enc->basegraph = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "z_c")) {
+ vector->mask |= TEST_BBDEV_VF_ZC;
+ ldpc_enc->z_c = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_filler")) {
+ vector->mask |= TEST_BBDEV_VF_F;
+ ldpc_enc->n_filler = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "code_block_mode")) {
+ vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
+ ldpc_enc->code_block_mode = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "op_flags")) {
+ vector->mask |= TEST_BBDEV_VF_OP_FLAGS;
+ ret = parse_turbo_flags(token, &op_flags, vector->op_type);
+ if (!ret)
+ ldpc_enc->op_flags = op_flags;
+ } else if (!strcmp(key_token, "expected_status")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS;
+ ret = parse_expected_status(token, &status, vector->op_type);
+ if (!ret)
+ vector->expected_status = status;
+ } else {
+ printf("Not valid ldpc enc key: '%s'\n", key_token);
+ return -1;
+ }
+
+ if (ret != 0) {
+ printf("Failed with convert '%s\t%s'\n", key_token, token);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* parses LDPC decoder parameters and assigns to global variable */
+static int
+parse_ldpc_decoder_params(const char *key_token, char *token,
+ struct test_bbdev_vector *vector)
+{
+ int ret = 0, status = 0;
+ uint32_t op_flags = 0;
+ char *err = NULL;
+
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+
+ if (starts_with(key_token, op_data_prefixes[DATA_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_INPUT,
+ op_data_prefixes[DATA_INPUT]);
+ else if (starts_with(key_token, "output"))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARD_OUTPUT,
+ "output");
+ else if (starts_with(key_token, op_data_prefixes[DATA_HARQ_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARQ_INPUT,
+ op_data_prefixes[DATA_HARQ_INPUT]);
+ else if (starts_with(key_token, op_data_prefixes[DATA_HARQ_OUTPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARQ_OUTPUT,
+ op_data_prefixes[DATA_HARQ_OUTPUT]);
+ else if (!strcmp(key_token, "e")) {
+ vector->mask |= TEST_BBDEV_VF_E;
+ ldpc_dec->cb_params.e = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "ea")) {
+ vector->mask |= TEST_BBDEV_VF_EA;
+ ldpc_dec->tb_params.ea = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "eb")) {
+ vector->mask |= TEST_BBDEV_VF_EB;
+ ldpc_dec->tb_params.eb = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "c")) {
+ vector->mask |= TEST_BBDEV_VF_C;
+ ldpc_dec->tb_params.c = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "cab")) {
+ vector->mask |= TEST_BBDEV_VF_CAB;
+ ldpc_dec->tb_params.cab = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "rv_index")) {
+ vector->mask |= TEST_BBDEV_VF_RV_INDEX;
+ ldpc_dec->rv_index = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_cb")) {
+ vector->mask |= TEST_BBDEV_VF_NCB;
+ ldpc_dec->n_cb = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "r")) {
+ vector->mask |= TEST_BBDEV_VF_R;
+ ldpc_dec->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "q_m")) {
+ vector->mask |= TEST_BBDEV_VF_QM;
+ ldpc_dec->q_m = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "basegraph")) {
+ vector->mask |= TEST_BBDEV_VF_BG;
+ ldpc_dec->basegraph = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "z_c")) {
+ vector->mask |= TEST_BBDEV_VF_ZC;
+ ldpc_dec->z_c = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_filler")) {
+ vector->mask |= TEST_BBDEV_VF_F;
+ ldpc_dec->n_filler = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "expected_iter_count")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_ITER_COUNT;
+ ldpc_dec->iter_count = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "iter_max")) {
+ vector->mask |= TEST_BBDEV_VF_ITER_MAX;
+ ldpc_dec->iter_max = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "code_block_mode")) {
+ vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
+ ldpc_dec->code_block_mode = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "op_flags")) {
+ vector->mask |= TEST_BBDEV_VF_OP_FLAGS;
+ ret = parse_turbo_flags(token, &op_flags, vector->op_type);
+ if (!ret)
+ ldpc_dec->op_flags = op_flags;
+ } else if (!strcmp(key_token, "expected_status")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS;
+ ret = parse_expected_status(token, &status, vector->op_type);
+ if (!ret)
+ vector->expected_status = status;
+ } else {
+ printf("Not valid ldpc dec key: '%s'\n", key_token);
+ return -1;
+ }
+
+ if (ret != 0) {
+ printf("Failed with convert '%s\t%s'\n", key_token, token);
+ return -1;
+ }
+
+ return 0;
+}
+
/* checks the type of key and assigns data */
static int
parse_entry(char *entry, struct test_bbdev_vector *vector)
} else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (parse_encoder_params(key_token, token, vector) == -1)
return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (parse_ldpc_encoder_params(key_token, token, vector) == -1)
+ return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (parse_ldpc_decoder_params(key_token, token, vector) == -1)
+ return -1;
}
return 0;
return 0;
}
+static int
+check_ldpc_decoder_segments(struct test_bbdev_vector *vector)
+{
+ unsigned char i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+
+ if (vector->entries[DATA_INPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++)
+ if (vector->entries[DATA_INPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if ((ldpc_dec->op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE) &&
+ (vector->entries[DATA_SOFT_OUTPUT].nb_segments == 0))
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_SOFT_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_SOFT_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if ((ldpc_dec->op_flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) &&
+ (vector->entries[DATA_HARQ_OUTPUT].nb_segments == 0))
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARQ_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARQ_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ return 0;
+}
+
static int
check_decoder_llr_spec(struct test_bbdev_vector *vector)
{
!(turbo_dec->op_flags &
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)) {
printf(
- "WARNING: input LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
+ "INFO: input LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
turbo_dec->op_flags |= RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN;
}
!(turbo_dec->op_flags &
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT)) {
printf(
- "WARNING: soft output LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
+ "INFO: soft output LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
turbo_dec->op_flags |=
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
}
return 0;
}
+static int
+check_decoder_op_flags(struct test_bbdev_vector *vector)
+{
+ struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec;
+
+ if ((turbo_dec->op_flags & RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP) &&
+ !(turbo_dec->op_flags & RTE_BBDEV_TURBO_CRC_TYPE_24B)) {
+ printf(
+ "WARNING: RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag is missing RTE_BBDEV_TURBO_CRC_TYPE_24B\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* checks decoder parameters */
static int
check_decoder(struct test_bbdev_vector *vector)
if (check_decoder_llr_spec(vector) < 0)
return -1;
+ if (check_decoder_op_flags(vector) < 0)
+ return -1;
+
/* Check which params were set */
if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
printf(
}
if (!(mask & TEST_BBDEV_VF_RV_INDEX))
printf(
- "WARNING: rv_index was not specified in vector file and will be set to 0\n");
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
if (!(mask & TEST_BBDEV_VF_ITER_MIN))
printf(
"WARNING: iter_min was not specified in vector file and will be set to 0\n");
} else if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_MAP_DEC) &&
mask & TEST_BBDEV_VF_NUM_MAPS) {
printf(
- "WARNING: RTE_BBDEV_TURBO_MAP_DEC was not set in vector file and num_maps will be set to 0\n");
+ "INFO: RTE_BBDEV_TURBO_MAP_DEC was not set in vector file and num_maps will be set to 0\n");
turbo_dec->num_maps = 0;
}
if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
return 0;
}
+/* checks LDPC decoder parameters */
+static int
+check_ldpc_decoder(struct test_bbdev_vector *vector)
+{
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+ const int mask = vector->mask;
+
+ if (check_ldpc_decoder_segments(vector) < 0)
+ return -1;
+
+ /*
+ * if (check_ldpc_decoder_llr_spec(vector) < 0)
+ * return -1;
+ *
+ * if (check_ldpc_decoder_op_flags(vector) < 0)
+ * return -1;
+ */
+
+ /* Check which params were set */
+ if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
+ printf(
+ "WARNING: code_block_mode was not specified in vector file and will be set to 1 (0 - TB Mode, 1 - CB mode)\n");
+ ldpc_dec->code_block_mode = 1;
+ }
+ if (ldpc_dec->code_block_mode == 0) {
+ if (!(mask & TEST_BBDEV_VF_EA))
+ printf(
+ "WARNING: ea was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_EB))
+ printf(
+ "WARNING: eb was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_C)) {
+ printf(
+ "WARNING: c was not specified in vector file and will be set to 1\n");
+ ldpc_dec->tb_params.c = 1;
+ }
+ if (!(mask & TEST_BBDEV_VF_CAB))
+ printf(
+ "WARNING: cab was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_R))
+ printf(
+ "WARNING: r was not specified in vector file and will be set to 0\n");
+ } else {
+ if (!(mask & TEST_BBDEV_VF_E))
+ printf(
+ "WARNING: e was not specified in vector file and will be set to 0\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_RV_INDEX))
+ printf(
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_ITER_MAX))
+ printf(
+ "WARNING: iter_max was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT))
+ printf(
+ "WARNING: expected_iter_count was not specified in vector file and iter_count will not be validated\n");
+ if (!(mask & TEST_BBDEV_VF_OP_FLAGS)) {
+ printf(
+ "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
+ printf(
+ "WARNING: expected_status was not specified in vector file and will be set to 0\n");
+ return 0;
+}
+
/* checks encoder parameters */
static int
check_encoder(struct test_bbdev_vector *vector)
}
if (!(mask & TEST_BBDEV_VF_RV_INDEX))
printf(
- "WARNING: rv_index was not specified in vector file and will be set to 0\n");
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
if (!(mask & TEST_BBDEV_VF_OP_FLAGS))
printf(
- "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n");
+ "INFO: op_flags was not specified in vector file and capabilities will not be validated\n");
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
+ printf(
+ "WARNING: expected_status was not specified in vector file and will be set to 0\n");
+
+ return 0;
+}
+
+
+/* checks encoder parameters */
+static int
+check_ldpc_encoder(struct test_bbdev_vector *vector)
+{
+ unsigned char i;
+ const int mask = vector->mask;
+
+ if (vector->entries[DATA_INPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++)
+ if (vector->entries[DATA_INPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
+ printf(
+ "WARNING: code_block_mode was not specified in vector file and will be set to 1\n");
+ vector->turbo_enc.code_block_mode = 1;
+ }
+ if (vector->turbo_enc.code_block_mode == 0) {
+ } else {
+ if (!(mask & TEST_BBDEV_VF_E) && (vector->turbo_enc.op_flags &
+ RTE_BBDEV_TURBO_RATE_MATCH))
+ printf(
+ "WARNING: e was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_NCB))
+ printf(
+ "WARNING: ncb was not specified in vector file and will be set to 0\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_BG))
+ printf(
+ "WARNING: BG was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_ZC))
+ printf(
+ "WARNING: Zc was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_RV_INDEX))
+ printf(
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_OP_FLAGS))
+ printf(
+ "INFO: op_flags was not specified in vector file and capabilities will not be validated\n");
if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
printf(
"WARNING: expected_status was not specified in vector file and will be set to 0\n");
} else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (check_encoder(vector) == -1)
return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (check_ldpc_encoder(vector) == -1)
+ return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (check_ldpc_decoder(vector) == -1)
+ return -1;
} else if (vector->op_type != RTE_BBDEV_OP_NONE) {
printf("Vector was not filled\n");
return -1;