#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_hexdump.h>
+#include <rte_interrupts.h>
+
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
+#include <fpga_lte_fec.h>
+#endif
#include "main.h"
#include "test_bbdev_vector.h"
#define MAX_QUEUES RTE_MAX_LCORE
#define TEST_REPETITIONS 1000
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
+#define FPGA_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
+#define FPGA_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
+#define VF_UL_QUEUE_VALUE 4
+#define VF_DL_QUEUE_VALUE 4
+#define UL_BANDWIDTH 3
+#define DL_BANDWIDTH 3
+#define UL_LOAD_BALANCE 128
+#define DL_LOAD_BALANCE 128
+#define FLR_TIMEOUT 610
+#endif
+
#define OPS_CACHE_SIZE 256U
#define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
struct rte_mempool *in_mbuf_pool;
struct rte_mempool *hard_out_mbuf_pool;
struct rte_mempool *soft_out_mbuf_pool;
+ struct rte_mempool *harq_in_mbuf_pool;
+ struct rte_mempool *harq_out_mbuf_pool;
} active_devs[RTE_BBDEV_MAX_DEVS];
static uint8_t nb_active_devs;
struct rte_bbdev_op_data *inputs;
struct rte_bbdev_op_data *hard_outputs;
struct rte_bbdev_op_data *soft_outputs;
+ struct rte_bbdev_op_data *harq_inputs;
+ struct rte_bbdev_op_data *harq_outputs;
};
/* Operation parameters specific for given test case */
struct thread_params {
uint8_t dev_id;
uint16_t queue_id;
+ uint32_t lcore_id;
uint64_t start_time;
double ops_per_sec;
double mbps;
uint8_t iter_count;
rte_atomic16_t nb_dequeued;
rte_atomic16_t processing_status;
+ rte_atomic16_t burst_sz;
struct test_op_params *op_params;
+ struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
+ struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
};
#ifdef RTE_BBDEV_OFFLOAD_COST
} while (m != NULL);
}
+/* Read flag value 0/1 from bitmap */
+static inline bool
+check_bit(uint32_t bitmap, uint32_t bitmask)
+{
+ return bitmap & bitmask;
+}
+
static inline void
set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
{
check_dev_cap(const struct rte_bbdev_info *dev_info)
{
unsigned int i;
- unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
+ unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
+ nb_harq_inputs, nb_harq_outputs;
const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
+ nb_harq_inputs = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
+ nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
if (op_cap->type != test_vector.op_type)
!(cap->capability_flags &
RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
printf(
- "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
+ "INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
dev_info->dev_name);
clear_soft_out_cap(
&test_vector.turbo_dec.op_flags);
if (nb_hard_outputs > cap->num_buffers_dst) {
printf(
"Too many hard outputs defined: %u, max: %u\n",
- nb_hard_outputs, cap->num_buffers_src);
+ nb_hard_outputs, cap->num_buffers_dst);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
+ const struct rte_bbdev_op_cap_ldpc_enc *cap =
+ &op_cap->cap.ldpc_enc;
+
+ if (!flags_match(test_vector.ldpc_enc.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_dst) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs, cap->num_buffers_dst);
return TEST_FAILED;
}
if (intr_enabled && !(cap->capability_flags &
return TEST_FAILED;
}
+ return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
+ const struct rte_bbdev_op_cap_ldpc_dec *cap =
+ &op_cap->cap.ldpc_dec;
+
+ if (!flags_match(test_vector.ldpc_dec.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_inputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ inputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
return TEST_SUCCESS;
}
}
&test_vector.entries[DATA_HARD_OUTPUT];
struct op_data_entries *soft_out =
&test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_in =
+ &test_vector.entries[DATA_HARQ_INPUT];
+ struct op_data_entries *harq_out =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
/* allocate ops mempool */
ops_pool_size = optimal_mempool_size(RTE_MAX(
socket_id);
ad->hard_out_mbuf_pool = mp;
- if (soft_out->nb_segments == 0)
- return TEST_SUCCESS;
/* Soft outputs */
- mbuf_pool_size = optimal_mempool_size(ops_pool_size *
- soft_out->nb_segments);
- mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
- "soft_out");
- TEST_ASSERT_NOT_NULL(mp,
- "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
- mbuf_pool_size,
- ad->dev_id,
- socket_id);
- ad->soft_out_mbuf_pool = mp;
+ if (soft_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ soft_out->nb_segments);
+ mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "soft_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->soft_out_mbuf_pool = mp;
+ }
- return 0;
+ /* HARQ inputs */
+ if (harq_in->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_in->nb_segments);
+ mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_in");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_in_mbuf_pool = mp;
+ }
+
+ /* HARQ outputs */
+ if (harq_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_out->nb_segments);
+ mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_out_mbuf_pool = mp;
+ }
+
+ return TEST_SUCCESS;
}
static int
unsigned int nb_queues;
enum rte_bbdev_op_type op_type = vector->op_type;
+/* Configure fpga lte fec with PF & VF values
+ * if '-i' flag is set and using fpga device
+ */
+#ifdef RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC
+ if ((get_init_device() == true) &&
+ (!strcmp(info->drv.driver_name, FPGA_PF_DRIVER_NAME))) {
+ struct fpga_lte_fec_conf conf;
+ unsigned int i;
+
+ printf("Configure FPGA FEC Driver %s with default values\n",
+ info->drv.driver_name);
+
+ /* clear default configuration before initialization */
+ memset(&conf, 0, sizeof(struct fpga_lte_fec_conf));
+
+ /* Set PF mode :
+ * true if PF is used for data plane
+ * false for VFs
+ */
+ conf.pf_mode_en = true;
+
+ for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
+ /* Number of UL queues per VF (fpga supports 8 VFs) */
+ conf.vf_ul_queues_number[i] = VF_UL_QUEUE_VALUE;
+ /* Number of DL queues per VF (fpga supports 8 VFs) */
+ conf.vf_dl_queues_number[i] = VF_DL_QUEUE_VALUE;
+ }
+
+ /* UL bandwidth. Needed for schedule algorithm */
+ conf.ul_bandwidth = UL_BANDWIDTH;
+ /* DL bandwidth */
+ conf.dl_bandwidth = DL_BANDWIDTH;
+
+ /* UL & DL load Balance Factor to 64 */
+ conf.ul_load_balance = UL_LOAD_BALANCE;
+ conf.dl_load_balance = DL_LOAD_BALANCE;
+
+ /**< FLR timeout value */
+ conf.flr_time_out = FLR_TIMEOUT;
+
+ /* setup FPGA PF with configuration information */
+ ret = fpga_lte_fec_configure(info->dev_name, &conf);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to configure 4G FPGA PF for bbdev %s",
+ info->dev_name);
+ }
+#endif
nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
+ nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
+
/* setup device */
ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
if (ret < 0) {
bufs[i].offset = 0;
bufs[i].length = 0;
- if (op_type == DATA_INPUT) {
+ if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
data = rte_pktmbuf_append(m_head, seg->length);
TEST_ASSERT_NOT_NULL(data,
"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
"Couldn't chain mbufs from %d data type mbuf pool",
op_type);
}
-
} else {
/* allocate chained-mbuf for output buffer */
static void
limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
- uint16_t n, int8_t max_llr_modulus)
+ const uint16_t n, const int8_t max_llr_modulus)
{
uint16_t i, byte_idx;
}
}
+static void
+ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
+ const uint16_t n, const int8_t llr_size,
+ const int8_t llr_decimals)
+{
+ if (input_ops == NULL)
+ return;
+
+ uint16_t i, byte_idx;
+
+ int16_t llr_max, llr_min, llr_tmp;
+ llr_max = (1 << (llr_size - 1)) - 1;
+ llr_min = -llr_max;
+ for (i = 0; i < n; ++i) {
+ struct rte_mbuf *m = input_ops[i].data;
+ while (m != NULL) {
+ int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
+ input_ops[i].offset);
+ for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
+ ++byte_idx) {
+
+ llr_tmp = llr[byte_idx];
+ if (llr_decimals == 2)
+ llr_tmp *= 2;
+ else if (llr_decimals == 0)
+ llr_tmp /= 2;
+ llr_tmp = RTE_MIN(llr_max,
+ RTE_MAX(llr_min, llr_tmp));
+ llr[byte_idx] = (int8_t) llr_tmp;
+ }
+
+ m = m->next;
+ }
+ }
+}
+
+
+
static int
fill_queue_buffers(struct test_op_params *op_params,
struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
- struct rte_mempool *soft_out_mp, uint16_t queue_id,
+ struct rte_mempool *soft_out_mp,
+ struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
+ uint16_t queue_id,
const struct rte_bbdev_op_cap *capabilities,
uint16_t min_alignment, const int socket_id)
{
in_mp,
soft_out_mp,
hard_out_mp,
+ harq_in_mp,
+ harq_out_mp,
};
struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
&op_params->q_bufs[socket_id][queue_id].inputs,
&op_params->q_bufs[socket_id][queue_id].soft_outputs,
&op_params->q_bufs[socket_id][queue_id].hard_outputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_inputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_outputs,
};
for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
capabilities->cap.turbo_dec.max_llr_modulus);
+ if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ }
+
return 0;
}
rte_mempool_free(ad->in_mbuf_pool);
rte_mempool_free(ad->hard_out_mbuf_pool);
rte_mempool_free(ad->soft_out_mbuf_pool);
+ rte_mempool_free(ad->harq_in_mbuf_pool);
+ rte_mempool_free(ad->harq_out_mbuf_pool);
for (i = 0; i < rte_lcore_count(); ++i) {
for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
rte_free(op_params->q_bufs[j][i].inputs);
rte_free(op_params->q_bufs[j][i].hard_outputs);
rte_free(op_params->q_bufs[j][i].soft_outputs);
+ rte_free(op_params->q_bufs[j][i].harq_inputs);
+ rte_free(op_params->q_bufs[j][i].harq_outputs);
}
}
}
}
}
+static void
+copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *hard_outputs,
+ struct rte_bbdev_op_data *soft_outputs,
+ struct rte_bbdev_op_data *harq_inputs,
+ struct rte_bbdev_op_data *harq_outputs,
+ struct rte_bbdev_dec_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ if (ldpc_dec->code_block_mode == 0) {
+ ops[i]->ldpc_dec.tb_params.ea =
+ ldpc_dec->tb_params.ea;
+ ops[i]->ldpc_dec.tb_params.eb =
+ ldpc_dec->tb_params.eb;
+ ops[i]->ldpc_dec.tb_params.c =
+ ldpc_dec->tb_params.c;
+ ops[i]->ldpc_dec.tb_params.cab =
+ ldpc_dec->tb_params.cab;
+ ops[i]->ldpc_dec.tb_params.r =
+ ldpc_dec->tb_params.r;
+ } else {
+ ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
+ }
+
+ ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
+ ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
+ ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
+ ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
+ ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
+ ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
+ ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
+ ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
+ ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
+
+ ops[i]->ldpc_dec.hard_output = hard_outputs[start_idx + i];
+ ops[i]->ldpc_dec.input = inputs[start_idx + i];
+ if (soft_outputs != NULL)
+ ops[i]->ldpc_dec.soft_output =
+ soft_outputs[start_idx + i];
+ if (harq_inputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_input =
+ harq_inputs[start_idx + i];
+ if (harq_outputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_output =
+ harq_outputs[start_idx + i];
+ }
+}
+
+
+static void
+copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *outputs,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
+ for (i = 0; i < n; ++i) {
+ if (ldpc_enc->code_block_mode == 0) {
+ ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
+ ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
+ ops[i]->ldpc_enc.tb_params.cab =
+ ldpc_enc->tb_params.cab;
+ ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
+ ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
+ } else {
+ ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
+ }
+ ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
+ ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
+ ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
+ ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
+ ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
+ ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
+ ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
+ ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
+ ops[i]->ldpc_enc.output = outputs[start_idx + i];
+ ops[i]->ldpc_enc.input = inputs[start_idx + i];
+ }
+}
+
static int
check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
unsigned int order_idx, const int expected_status)
return TEST_SUCCESS;
}
+
+static int
+validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
+ struct rte_bbdev_dec_op *ref_op, const int vector_mask)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+ struct op_data_entries *soft_data_orig =
+ &test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_data_orig =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
+ struct rte_bbdev_op_ldpc_dec *ops_td;
+ struct rte_bbdev_op_data *hard_output;
+ struct rte_bbdev_op_data *harq_output;
+ struct rte_bbdev_op_data *soft_output;
+ struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ ops_td = &ops[i]->ldpc_dec;
+ hard_output = &ops_td->hard_output;
+ harq_output = &ops_td->harq_combined_output;
+ soft_output = &ops_td->soft_output;
+
+ ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for decoder failed");
+ if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
+ TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
+ "Returned iter_count (%d) > expected iter_count (%d)",
+ ops_td->iter_count, ref_td->iter_count);
+ /* We can ignore data when the decoding failed to converge */
+ if ((ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR)) == 0)
+ TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
+ hard_data_orig),
+ "Hard output buffers (CB=%u) are not equal",
+ i);
+
+ if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
+ TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
+ soft_data_orig),
+ "Soft output buffers (CB=%u) are not equal",
+ i);
+ if (ref_op->ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
+ ldpc_input_llr_scaling(harq_output, 1, 8, 0);
+ TEST_ASSERT_SUCCESS(validate_op_chain(harq_output,
+ harq_data_orig),
+ "HARQ output buffers (CB=%u) are not equal",
+ i);
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+
static int
validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
struct rte_bbdev_enc_op *ref_op)
return TEST_SUCCESS;
}
+static int
+validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+
+ for (i = 0; i < n; ++i) {
+ ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for encoder failed");
+ TEST_ASSERT_SUCCESS(validate_op_chain(
+ &ops[i]->ldpc_enc.output,
+ hard_data_orig),
+ "Output buffers (CB=%u) are not equal",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
static void
create_reference_dec_op(struct rte_bbdev_dec_op *op)
{
entry->segments[i].length;
}
+static void
+create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_dec = test_vector.ldpc_dec;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.input.length +=
+ entry->segments[i].length;
+ if (test_vector.ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
+ entry = &test_vector.entries[DATA_HARQ_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.harq_combined_input.length +=
+ entry->segments[i].length;
+ }
+}
+
+
static void
create_reference_enc_op(struct rte_bbdev_enc_op *op)
{
entry->segments[i].length;
}
+static void
+create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_enc = test_vector.ldpc_enc;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_enc.input.length +=
+ entry->segments[i].length;
+}
+
static uint32_t
calc_dec_TB_size(struct rte_bbdev_dec_op *op)
{
return tb_size;
}
+static uint32_t
+calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
+
+ if (op->ldpc_dec.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
+ } else {
+ c = op->ldpc_dec.tb_params.c;
+ r = op->ldpc_dec.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_dec.z_c
+ - op->ldpc_dec.n_filler;
+ }
+ return tb_size;
+}
+
static uint32_t
calc_enc_TB_size(struct rte_bbdev_enc_op *op)
{
return tb_size;
}
+static uint32_t
+calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
+
+ if (op->turbo_enc.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
+ } else {
+ c = op->turbo_enc.tb_params.c;
+ r = op->turbo_enc.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_enc.z_c
+ - op->ldpc_enc.n_filler;
+ }
+ return tb_size;
+}
+
+
static int
init_test_op_params(struct test_op_params *op_params,
enum rte_bbdev_op_type op_type, const int expected_status,
uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
{
int ret = 0;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
&op_params->ref_dec_op, 1);
else
op_params->num_to_process = num_to_process;
op_params->num_lcores = num_lcores;
op_params->vector_mask = vector_mask;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
op_params->ref_dec_op->status = expected_status;
- else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC
+ || op_type == RTE_BBDEV_OP_LDPC_ENC)
op_params->ref_enc_op->status = expected_status;
-
return 0;
}
goto fail;
}
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
- /* Find Decoder capabilities */
- const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
- while (cap->type != RTE_BBDEV_OP_NONE) {
- if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
- capabilities = cap;
- break;
- }
- }
- TEST_ASSERT_NOT_NULL(capabilities,
- "Couldn't find Decoder capabilities");
+ /* Find capabilities */
+ const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
+ for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
+ if (cap->type == test_vector.op_type) {
+ capabilities = cap;
+ break;
+ }
+ cap++;
+ }
+ TEST_ASSERT_NOT_NULL(capabilities,
+ "Couldn't find capabilities");
+
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
create_reference_dec_op(op_params->ref_dec_op);
} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
create_reference_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ create_reference_ldpc_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ create_reference_ldpc_dec_op(op_params->ref_dec_op);
for (i = 0; i < ad->nb_queues; ++i) {
f_ret = fill_queue_buffers(op_params,
ad->in_mbuf_pool,
ad->hard_out_mbuf_pool,
ad->soft_out_mbuf_pool,
+ ad->harq_in_mbuf_pool,
+ ad->harq_out_mbuf_pool,
ad->queue_ids[i],
capabilities,
info.drv.min_alignment,
uint16_t i;
uint64_t total_time;
uint16_t deq, burst_sz, num_ops;
- uint16_t queue_id = INVALID_QUEUE_ID;
- struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
- struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
+ uint16_t queue_id = *(uint16_t *) ret_param;
struct rte_bbdev_info info;
-
double tb_len_bits;
-
struct thread_params *tp = cb_arg;
- RTE_SET_USED(ret_param);
- queue_id = tp->queue_id;
/* Find matching thread params using queue_id */
for (i = 0; i < MAX_QUEUES; ++i, ++tp)
return;
}
- burst_sz = tp->op_params->burst_sz;
+ burst_sz = rte_atomic16_read(&tp->burst_sz);
num_ops = tp->op_params->num_to_process;
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
- deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops,
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+ &tp->dec_ops[
+ rte_atomic16_read(&tp->nb_dequeued)],
burst_sz);
- rte_bbdev_dec_op_free_bulk(dec_ops, deq);
- } else {
- deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops,
+ else
+ deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
+ &tp->enc_ops[
+ rte_atomic16_read(&tp->nb_dequeued)],
burst_sz);
- rte_bbdev_enc_op_free_bulk(enc_ops, deq);
- }
if (deq < burst_sz) {
printf(
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
- ret = validate_dec_op(dec_ops, num_ops, ref_op,
+ ret = validate_dec_op(tp->dec_ops, num_ops, ref_op,
tp->op_params->vector_mask);
- rte_bbdev_dec_op_free_bulk(dec_ops, deq);
+ /* get the max of iter_count for all dequeued ops */
+ for (i = 0; i < num_ops; ++i)
+ tp->iter_count = RTE_MAX(
+ tp->dec_ops[i]->turbo_dec.iter_count,
+ tp->iter_count);
+ rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) {
struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
- ret = validate_enc_op(enc_ops, num_ops, ref_op);
- rte_bbdev_enc_op_free_bulk(enc_ops, deq);
+ ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
+ rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
+ rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
}
if (ret) {
case RTE_BBDEV_OP_TURBO_ENC:
tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
break;
+ case RTE_BBDEV_OP_LDPC_DEC:
+ tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
+ break;
+ case RTE_BBDEV_OP_LDPC_ENC:
+ tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
+ break;
case RTE_BBDEV_OP_NONE:
tb_len_bits = 0.0;
break;
return;
}
- tp->ops_per_sec = ((double)num_ops) /
+ tp->ops_per_sec += ((double)num_ops) /
((double)total_time / (double)rte_get_tsc_hz());
- tp->mbps = (((double)(num_ops * tb_len_bits)) / 1000000.0) /
+ tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
rte_atomic16_add(&tp->nb_dequeued, deq);
struct rte_bbdev_dec_op *ops[num_to_process];
struct test_buffers *bufs = NULL;
struct rte_bbdev_info info;
- int ret;
- uint16_t num_to_enq;
+ int ret, i, j;
+ uint16_t num_to_enq, enq;
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
"BURST_SIZE should be <= %u", MAX_BURST);
bufs->hard_outputs, bufs->soft_outputs,
tp->op_params->ref_dec_op);
- tp->start_time = rte_rdtsc_precise();
- for (enqueued = 0; enqueued < num_to_process;) {
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_to_process; ++j)
+ ops[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (j = 0; j < TEST_REPETITIONS; ++j) {
+ for (i = 0; i < num_to_process; ++i)
+ rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
- num_to_enq = burst_sz;
+ tp->start_time = rte_rdtsc_precise();
+ for (enqueued = 0; enqueued < num_to_process;) {
+ num_to_enq = burst_sz;
- if (unlikely(num_to_process - enqueued < num_to_enq))
- num_to_enq = num_to_process - enqueued;
+ if (unlikely(num_to_process - enqueued < num_to_enq))
+ num_to_enq = num_to_process - enqueued;
+
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(num_to_enq != enq));
+ enqueued += enq;
+
+ /* Write to thread burst_sz current number of enqueued
+ * descriptors. It ensures that proper number of
+ * descriptors will be dequeued in callback
+ * function - needed for last batch in case where
+ * the number of operations is not a multiple of
+ * burst size.
+ */
+ rte_atomic16_set(&tp->burst_sz, num_to_enq);
- enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id,
- &ops[enqueued], num_to_enq);
+ /* Wait until processing of previous batch is
+ * completed
+ */
+ while (rte_atomic16_read(&tp->nb_dequeued) !=
+ (int16_t) enqueued)
+ rte_pause();
+ }
+ if (j != TEST_REPETITIONS - 1)
+ rte_atomic16_clear(&tp->nb_dequeued);
}
return TEST_SUCCESS;
struct rte_bbdev_enc_op *ops[num_to_process];
struct test_buffers *bufs = NULL;
struct rte_bbdev_info info;
- int ret;
- uint16_t num_to_enq;
+ int ret, i, j;
+ uint16_t num_to_enq, enq;
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
"BURST_SIZE should be <= %u", MAX_BURST);
copy_reference_enc_op(ops, num_to_process, 0, bufs->inputs,
bufs->hard_outputs, tp->op_params->ref_enc_op);
- tp->start_time = rte_rdtsc_precise();
- for (enqueued = 0; enqueued < num_to_process;) {
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_to_process; ++j)
+ ops[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (j = 0; j < TEST_REPETITIONS; ++j) {
+ for (i = 0; i < num_to_process; ++i)
+ rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
- num_to_enq = burst_sz;
+ tp->start_time = rte_rdtsc_precise();
+ for (enqueued = 0; enqueued < num_to_process;) {
+ num_to_enq = burst_sz;
- if (unlikely(num_to_process - enqueued < num_to_enq))
- num_to_enq = num_to_process - enqueued;
+ if (unlikely(num_to_process - enqueued < num_to_enq))
+ num_to_enq = num_to_process - enqueued;
+
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(enq != num_to_enq));
+ enqueued += enq;
+
+ /* Write to thread burst_sz current number of enqueued
+ * descriptors. It ensures that proper number of
+ * descriptors will be dequeued in callback
+ * function - needed for last batch in case where
+ * the number of operations is not a multiple of
+ * burst size.
+ */
+ rte_atomic16_set(&tp->burst_sz, num_to_enq);
- enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id,
- &ops[enqueued], num_to_enq);
+ /* Wait until processing of previous batch is
+ * completed
+ */
+ while (rte_atomic16_read(&tp->nb_dequeued) !=
+ (int16_t) enqueued)
+ rte_pause();
+ }
+ if (j != TEST_REPETITIONS - 1)
+ rte_atomic16_clear(&tp->nb_dequeued);
}
return TEST_SUCCESS;
return TEST_SUCCESS;
}
+static int
+throughput_pmd_lcore_ldpc_dec(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_dec_op *ops_enq[num_ops];
+ struct rte_bbdev_dec_op *ops_deq[num_ops];
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
+
+ /* For throughput tests we need to disable early termination */
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
+ ref_op->ldpc_dec.op_flags -=
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+ ref_op->ldpc_dec.iter_max = 6;
+ ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, bufs->soft_outputs,
+ bufs->harq_inputs, bufs->harq_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+ for (j = 0; j < num_ops; ++j) {
+ mbuf_reset(ops_enq[j]->ldpc_dec.hard_output.data);
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
+ mbuf_reset(
+ ops_enq[j]->ldpc_dec.harq_combined_output.data);
+ }
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ tp->iter_count = 0;
+ /* get the max of iter_count for all dequeued ops */
+ for (i = 0; i < num_ops; ++i) {
+ tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
+ tp->iter_count);
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
+ 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
static int
throughput_pmd_lcore_enc(void *arg)
{
TEST_ASSERT_SUCCESS(ret, "Validation failed!");
}
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
double tb_len_bits = calc_enc_TB_size(ref_op);
tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
return TEST_SUCCESS;
}
+static int
+throughput_pmd_lcore_ldpc_enc(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_enc_op *ops_enq[num_ops];
+ struct rte_bbdev_enc_op *ops_deq[num_ops];
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
+ num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
+ num_ops);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ for (j = 0; j < num_ops; ++j)
+ mbuf_reset(ops_enq[j]->turbo_enc.output.data);
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
+ / 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
static void
print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
{
- unsigned int lcore_id, iter = 0;
+ unsigned int iter = 0;
double total_mops = 0, total_mbps = 0;
- RTE_LCORE_FOREACH(lcore_id) {
- if (iter++ >= used_cores)
- break;
+ for (iter = 0; iter < used_cores; iter++) {
printf(
- "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps\n",
- lcore_id, t_params[lcore_id].ops_per_sec,
- t_params[lcore_id].mbps);
- total_mops += t_params[lcore_id].ops_per_sec;
- total_mbps += t_params[lcore_id].mbps;
+ "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps\n",
+ t_params[iter].lcore_id, t_params[iter].ops_per_sec,
+ t_params[iter].mbps);
+ total_mops += t_params[iter].ops_per_sec;
+ total_mbps += t_params[iter].mbps;
}
printf(
"\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps\n",
static void
print_dec_throughput(struct thread_params *t_params, unsigned int used_cores)
{
- unsigned int lcore_id, iter = 0;
+ unsigned int iter = 0;
double total_mops = 0, total_mbps = 0;
uint8_t iter_count = 0;
- RTE_LCORE_FOREACH(lcore_id) {
- if (iter++ >= used_cores)
- break;
+ for (iter = 0; iter < used_cores; iter++) {
printf(
- "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
- lcore_id, t_params[lcore_id].ops_per_sec,
- t_params[lcore_id].mbps,
- t_params[lcore_id].iter_count);
- total_mops += t_params[lcore_id].ops_per_sec;
- total_mbps += t_params[lcore_id].mbps;
- iter_count = RTE_MAX(iter_count, t_params[lcore_id].iter_count);
+ "Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
+ t_params[iter].lcore_id, t_params[iter].ops_per_sec,
+ t_params[iter].mbps, t_params[iter].iter_count);
+ total_mops += t_params[iter].ops_per_sec;
+ total_mbps += t_params[iter].mbps;
+ iter_count = RTE_MAX(iter_count, t_params[iter].iter_count);
}
printf(
"\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps @ max %u iterations\n",
{
int ret;
unsigned int lcore_id, used_cores = 0;
- struct thread_params t_params[MAX_QUEUES];
+ struct thread_params *t_params, *tp;
struct rte_bbdev_info info;
lcore_function_t *throughput_function;
- struct thread_params *tp;
uint16_t num_lcores;
const char *op_type_str;
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
test_vector.op_type);
- printf(
- "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
info.dev_name, ad->nb_queues, op_params->burst_sz,
op_params->num_to_process, op_params->num_lcores,
op_type_str,
? ad->nb_queues
: op_params->num_lcores;
+ /* Allocate memory for thread parameters structure */
+ t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
+ RTE_CACHE_LINE_SIZE);
+ TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
+ RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
+ RTE_CACHE_LINE_SIZE));
+
if (intr_enabled) {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_intr_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_intr_lcore_enc;
else
throughput_function = throughput_intr_lcore_enc;
/* Dequeue interrupt callback registration */
ret = rte_bbdev_callback_register(ad->dev_id,
RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
- &t_params);
- if (ret < 0)
+ t_params);
+ if (ret < 0) {
+ rte_free(t_params);
return ret;
+ }
} else {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_pmd_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_pmd_lcore_ldpc_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_pmd_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_pmd_lcore_ldpc_enc;
else
throughput_function = throughput_pmd_lcore_enc;
}
rte_atomic16_set(&op_params->sync, SYNC_WAIT);
- t_params[rte_lcore_id()].dev_id = ad->dev_id;
- t_params[rte_lcore_id()].op_params = op_params;
- t_params[rte_lcore_id()].queue_id =
- ad->queue_ids[used_cores++];
+ /* Master core is set at first entry */
+ t_params[0].dev_id = ad->dev_id;
+ t_params[0].lcore_id = rte_lcore_id();
+ t_params[0].op_params = op_params;
+ t_params[0].queue_id = ad->queue_ids[used_cores++];
+ t_params[0].iter_count = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (used_cores >= num_lcores)
break;
- t_params[lcore_id].dev_id = ad->dev_id;
- t_params[lcore_id].op_params = op_params;
- t_params[lcore_id].queue_id = ad->queue_ids[used_cores++];
+ t_params[used_cores].dev_id = ad->dev_id;
+ t_params[used_cores].lcore_id = lcore_id;
+ t_params[used_cores].op_params = op_params;
+ t_params[used_cores].queue_id = ad->queue_ids[used_cores];
+ t_params[used_cores].iter_count = 0;
- rte_eal_remote_launch(throughput_function, &t_params[lcore_id],
- lcore_id);
+ rte_eal_remote_launch(throughput_function,
+ &t_params[used_cores++], lcore_id);
}
rte_atomic16_set(&op_params->sync, SYNC_START);
- ret = throughput_function(&t_params[rte_lcore_id()]);
+ ret = throughput_function(&t_params[0]);
/* Master core is always used */
- used_cores = 1;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (used_cores++ >= num_lcores)
- break;
-
- ret |= rte_eal_wait_lcore(lcore_id);
- }
+ for (used_cores = 1; used_cores < num_lcores; used_cores++)
+ ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
/* Return if test failed */
- if (ret)
+ if (ret) {
+ rte_free(t_params);
return ret;
+ }
/* Print throughput if interrupts are disabled and test passed */
if (!intr_enabled) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
else
print_enc_throughput(t_params, num_lcores);
+ rte_free(t_params);
return ret;
}
* error using processing_status variable.
* Wait for master lcore operations.
*/
- tp = &t_params[rte_lcore_id()];
+ tp = &t_params[0];
while ((rte_atomic16_read(&tp->nb_dequeued) <
op_params->num_to_process) &&
(rte_atomic16_read(&tp->processing_status) !=
TEST_FAILED))
rte_pause();
- ret |= rte_atomic16_read(&tp->processing_status);
+ tp->ops_per_sec /= TEST_REPETITIONS;
+ tp->mbps /= TEST_REPETITIONS;
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
/* Wait for slave lcores operations */
- used_cores = 1;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- tp = &t_params[lcore_id];
- if (used_cores++ >= num_lcores)
- break;
+ for (used_cores = 1; used_cores < num_lcores; used_cores++) {
+ tp = &t_params[used_cores];
while ((rte_atomic16_read(&tp->nb_dequeued) <
op_params->num_to_process) &&
TEST_FAILED))
rte_pause();
- ret |= rte_atomic16_read(&tp->processing_status);
+ tp->ops_per_sec /= TEST_REPETITIONS;
+ tp->mbps /= TEST_REPETITIONS;
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
}
/* Print throughput if test passed */
if (!ret) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
- else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
print_enc_throughput(t_params, num_lcores);
}
+
+ rte_free(t_params);
return ret;
}
}
static int
-latency_test_enc(struct rte_mempool *mempool,
- struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
- uint16_t dev_id, uint16_t queue_id,
+latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
+ int vector_mask, uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
{
int ret = TEST_SUCCESS;
uint16_t i, j, dequeued;
- struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
uint64_t start_time = 0, last_time = 0;
for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
TEST_ASSERT_SUCCESS(ret,
- "rte_bbdev_enc_op_alloc_bulk() failed");
+ "rte_bbdev_dec_op_alloc_bulk() failed");
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
- copy_reference_enc_op(ops_enq, burst_sz, dequeued,
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
ref_op);
/* Set counter to validate the ordering */
start_time = rte_rdtsc_precise();
- enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
- burst_sz);
+ enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
TEST_ASSERT(enq == burst_sz,
"Error enqueueing burst, expected %u, got %u",
burst_sz, enq);
/* Dequeue */
do {
- deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&ops_deq[deq], burst_sz - deq);
if (likely(first_time && (deq > 0))) {
- last_time += rte_rdtsc_precise() - start_time;
+ last_time = rte_rdtsc_precise() - start_time;
first_time = false;
}
} while (unlikely(burst_sz != deq));
*total_time += last_time;
if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
- ret = validate_enc_op(ops_deq, burst_sz, ref_op);
+ ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
+ vector_mask);
TEST_ASSERT_SUCCESS(ret, "Validation failed!");
}
- rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
dequeued += deq;
}
}
static int
-latency_test(struct active_device *ad,
- struct test_op_params *op_params)
-{
+latency_test_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
+ uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_enc_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
+ burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time += rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_enc_op(ops_deq, burst_sz, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
+static int
+latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
+ uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_enc_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ /*
+ * printf("Latency Debug %d\n",
+ * ops_enq[0]->ldpc_enc.cb_params.z_c); REMOVEME
+ */
+
+ enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time += rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_enc_op(ops_deq, burst_sz, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ /*
+ * printf("Ready to free - deq %d num_to_process %d\n", FIXME
+ * deq, num_to_process);
+ * printf("cache %d\n", ops_enq[0]->mempool->cache_size);
+ */
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
+static int
+latency_test(struct active_device *ad,
+ struct test_op_params *op_params)
+{
int iter;
uint16_t burst_sz = op_params->burst_sz;
const uint16_t num_to_process = op_params->num_to_process;
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nValidation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: validation/latency\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
op_params->ref_dec_op, op_params->vector_mask,
ad->dev_id, queue_id, num_to_process,
burst_sz, &total_time, &min_time, &max_time);
- else
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
iter = latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &total_time,
&min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, op_params->vector_mask,
+ ad->dev_id, queue_id, num_to_process,
+ burst_sz, &total_time, &min_time, &max_time);
+ else
+ iter = latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op,
+ ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
if (iter <= 0)
return TEST_FAILED;
printf("Operation latency:\n"
- "\tavg latency: %lg cycles, %lg us\n"
- "\tmin latency: %lg cycles, %lg us\n"
- "\tmax latency: %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)total_time / (double)iter,
(double)(total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)min_time,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
return i;
}
+static int
+offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ /* Dequeue remaining operations if needed*/
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
static int
offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_enc_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
return i;
}
+
+static int
+offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
#endif
static int
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_test_dec(op_params->mp, bufs,
op_params->ref_dec_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ iter = offload_latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
else
iter = offload_latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
if (iter <= 0)
return TEST_FAILED;
- printf("Enqueue offload cost latency:\n"
- "\tDriver offload avg %lg cycles, %lg us\n"
- "\tDriver offload min %lg cycles, %lg us\n"
- "\tDriver offload max %lg cycles, %lg us\n"
- "\tAccelerator offload avg %lg cycles, %lg us\n"
- "\tAccelerator offload min %lg cycles, %lg us\n"
- "\tAccelerator offload max %lg cycles, %lg us\n",
+ printf("Enqueue driver offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n"
+ "Enqueue accelerator offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.enq_sw_total_time / (double)iter,
(double)(time_st.enq_sw_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
rte_get_tsc_hz());
printf("Dequeue offload cost latency - one op:\n"
- "\tavg %lg cycles, %lg us\n"
- "\tmin %lg cycles, %lg us\n"
- "\tmax %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.deq_total_time / (double)iter,
(double)(time_st.deq_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
if (iter <= 0)
return TEST_FAILED;
- printf("Empty dequeue offload\n"
- "\tavg. latency: %lg cycles, %lg us\n"
- "\tmin. latency: %lg cycles, %lg us\n"
- "\tmax. latency: %lg cycles, %lg us\n",
+ printf("Empty dequeue offload:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)deq_total_time / (double)iter,
(double)(deq_total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)deq_min_time,