uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- rte_atomic16_t sync;
+ uint16_t sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
uint8_t iter_count;
double iter_average;
double bler;
- rte_atomic16_t nb_dequeued;
- rte_atomic16_t processing_status;
- rte_atomic16_t burst_sz;
+ uint16_t nb_dequeued;
+ int16_t processing_status;
+ uint16_t burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
*op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
}
+/* This API is to convert all the test vector op data entries
+ * to big endian format. It is used when the device supports
+ * the input in the big endian format.
+ */
+static inline void
+convert_op_data_to_be(void)
+{
+ struct op_data_entries *op;
+ enum op_data_type type;
+ uint8_t nb_segs, *rem_data, temp;
+ uint32_t *data, len;
+ int complete, rem, i, j;
+
+ for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
+ nb_segs = test_vector.entries[type].nb_segments;
+ op = &test_vector.entries[type];
+
+ /* Invert byte endianness for all the segments */
+ for (i = 0; i < nb_segs; ++i) {
+ len = op->segments[i].length;
+ data = op->segments[i].addr;
+
+ /* Swap complete u32 bytes */
+ complete = len / 4;
+ for (j = 0; j < complete; j++)
+ data[j] = rte_bswap32(data[j]);
+
+ /* Swap any remaining bytes */
+ rem = len % 4;
+ rem_data = (uint8_t *)&data[j];
+ for (j = 0; j < rem/2; j++) {
+ temp = rem_data[j];
+ rem_data[j] = rem_data[rem - j - 1];
+ rem_data[rem - j - 1] = temp;
+ }
+ }
+ }
+}
+
static int
check_dev_cap(const struct rte_bbdev_info *dev_info)
{
unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
nb_harq_inputs, nb_harq_outputs;
const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
+ uint8_t dev_data_endianness = dev_info->drv.data_endianness;
nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
if (op_cap->type != test_vector.op_type)
continue;
+ if (dev_data_endianness == RTE_BIG_ENDIAN)
+ convert_op_data_to_be();
+
if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
const struct rte_bbdev_op_cap_turbo_dec *cap =
&op_cap->cap.turbo_dec;
if (nb_harq_inputs > cap->num_buffers_hard_out) {
printf(
"Too many HARQ inputs defined: %u, max: %u\n",
- nb_hard_outputs,
+ nb_harq_inputs,
cap->num_buffers_hard_out);
return TEST_FAILED;
}
if (nb_harq_outputs > cap->num_buffers_hard_out) {
printf(
"Too many HARQ outputs defined: %u, max: %u\n",
- nb_hard_outputs,
+ nb_harq_outputs,
cap->num_buffers_hard_out);
return TEST_FAILED;
}
if ((op_type == DATA_INPUT) && large_input) {
/* Allocate a fake overused mbuf */
data = rte_malloc(NULL, seg->length, 0);
+ TEST_ASSERT_NOT_NULL(data,
+ "rte malloc failed with %u bytes",
+ seg->length);
memcpy(data, seg->addr, seg->length);
m_head->buf_addr = data;
m_head->buf_iova = rte_malloc_virt2iova(data);
struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
for (i = 0; i < n; ++i) {
- if (turbo_dec->code_block_mode == 0) {
+ if (turbo_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
ops[i]->turbo_dec.tb_params.ea =
turbo_dec->tb_params.ea;
ops[i]->turbo_dec.tb_params.eb =
unsigned int i;
struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
for (i = 0; i < n; ++i) {
- if (turbo_enc->code_block_mode == 0) {
+ if (turbo_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
ops[i]->turbo_enc.tb_params.ea =
turbo_enc->tb_params.ea;
ops[i]->turbo_enc.tb_params.eb =
struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
for (i = 0; i < n; ++i) {
- if (ldpc_dec->code_block_mode == 0) {
+ if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
ops[i]->ldpc_dec.tb_params.ea =
ldpc_dec->tb_params.ea;
ops[i]->ldpc_dec.tb_params.eb =
unsigned int i;
struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
for (i = 0; i < n; ++i) {
- if (ldpc_enc->code_block_mode == 0) {
+ if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
ops[i]->ldpc_enc.tb_params.cab =
uint8_t i;
uint32_t c, r, tb_size = 0;
- if (op->turbo_dec.code_block_mode) {
+ if (op->turbo_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
tb_size = op->turbo_dec.tb_params.k_neg;
} else {
c = op->turbo_dec.tb_params.c;
uint32_t c, r, tb_size = 0;
uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
- if (op->ldpc_dec.code_block_mode) {
+ if (op->ldpc_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
} else {
c = op->ldpc_dec.tb_params.c;
uint8_t i;
uint32_t c, r, tb_size = 0;
- if (op->turbo_enc.code_block_mode) {
+ if (op->turbo_enc.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
tb_size = op->turbo_enc.tb_params.k_neg;
} else {
c = op->turbo_enc.tb_params.c;
uint32_t c, r, tb_size = 0;
uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
- if (op->turbo_enc.code_block_mode) {
+ if (op->ldpc_enc.code_block_mode == RTE_BBDEV_CODE_BLOCK) {
tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
} else {
c = op->turbo_enc.tb_params.c;
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- rte_atomic16_set(&tp->processing_status, TEST_FAILED);
+ __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = rte_atomic16_read(&tp->burst_sz);
+ burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- rte_atomic16_read(&tp->nb_dequeued)],
+ __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- rte_atomic16_read(&tp->nb_dequeued)],
+ __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- rte_atomic16_read(&tp->nb_dequeued)],
+ __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- rte_atomic16_read(&tp->nb_dequeued)],
+ __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- rte_atomic16_set(&tp->processing_status, TEST_FAILED);
+ __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
return;
}
- if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_ops) {
- rte_atomic16_add(&tp->nb_dequeued, deq);
+ if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
+ __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
return;
}
if (ret) {
printf("Buffers validation failed\n");
- rte_atomic16_set(&tp->processing_status, TEST_FAILED);
+ __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
}
switch (test_vector.op_type) {
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- rte_atomic16_set(&tp->processing_status, TEST_FAILED);
+ __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
return;
}
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- rte_atomic16_add(&tp->nb_dequeued, deq);
+ __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
}
static int
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_atomic16_clear(&tp->processing_status);
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
* the number of operations is not a multiple of
* burst size.
*/
- rte_atomic16_set(&tp->burst_sz, num_to_enq);
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
/* Wait until processing of previous batch is
* completed
*/
- while (rte_atomic16_read(&tp->nb_dequeued) !=
- (int16_t) enqueued)
- rte_pause();
+ rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
}
if (j != TEST_REPETITIONS - 1)
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
}
return TEST_SUCCESS;
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_atomic16_clear(&tp->processing_status);
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
* the number of operations is not a multiple of
* burst size.
*/
- rte_atomic16_set(&tp->burst_sz, num_to_enq);
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
/* Wait until processing of previous batch is
* completed
*/
- while (rte_atomic16_read(&tp->nb_dequeued) !=
- (int16_t) enqueued)
- rte_pause();
+ rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
}
if (j != TEST_REPETITIONS - 1)
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
}
return TEST_SUCCESS;
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_atomic16_clear(&tp->processing_status);
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
* the number of operations is not a multiple of
* burst size.
*/
- rte_atomic16_set(&tp->burst_sz, num_to_enq);
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
/* Wait until processing of previous batch is
* completed
*/
- while (rte_atomic16_read(&tp->nb_dequeued) !=
- (int16_t) enqueued)
- rte_pause();
+ rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
}
if (j != TEST_REPETITIONS - 1)
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
}
return TEST_SUCCESS;
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_atomic16_clear(&tp->processing_status);
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
* the number of operations is not a multiple of
* burst size.
*/
- rte_atomic16_set(&tp->burst_sz, num_to_enq);
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
/* Wait until processing of previous batch is
* completed
*/
- while (rte_atomic16_read(&tp->nb_dequeued) !=
- (int16_t) enqueued)
- rte_pause();
+ rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
}
if (j != TEST_REPETITIONS - 1)
- rte_atomic16_clear(&tp->nb_dequeued);
+ __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
}
return TEST_SUCCESS;
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
- rte_pause();
+ rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
else
return TEST_SKIPPED;
- rte_atomic16_set(&op_params->sync, SYNC_WAIT);
+ __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
&t_params[used_cores++], lcore_id);
}
- rte_atomic16_set(&op_params->sync, SYNC_START);
+ __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = bler_function(&t_params[0]);
/* Main core is always used */
throughput_function = throughput_pmd_lcore_enc;
}
- rte_atomic16_set(&op_params->sync, SYNC_WAIT);
+ __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
&t_params[used_cores++], lcore_id);
}
- rte_atomic16_set(&op_params->sync, SYNC_START);
+ __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((rte_atomic16_read(&tp->nb_dequeued) <
- op_params->num_to_process) &&
- (rte_atomic16_read(&tp->processing_status) !=
- TEST_FAILED))
+ while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ op_params->num_to_process) &&
+ (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)rte_atomic16_read(&tp->processing_status);
+ ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((rte_atomic16_read(&tp->nb_dequeued) <
- op_params->num_to_process) &&
- (rte_atomic16_read(&tp->processing_status) !=
- TEST_FAILED))
+ while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ op_params->num_to_process) &&
+ (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)rte_atomic16_read(&tp->processing_status);
+ ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
}
/* Print throughput if test passed */
/* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
- &ops_deq[deq], 1);
- } while (unlikely(deq != 1));
+ &ops_deq[deq], enq);
+ } while (unlikely(deq == 0));
deq_last_time = rte_rdtsc_precise() - deq_start_time;
time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
/* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
- &ops_deq[deq], 1);
- } while (unlikely(deq != 1));
+ &ops_deq[deq], enq);
+ } while (unlikely(deq == 0));
deq_last_time = rte_rdtsc_precise() - deq_start_time;
time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
/* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
- &ops_deq[deq], 1);
- } while (unlikely(deq != 1));
+ &ops_deq[deq], enq);
+ } while (unlikely(deq == 0));
deq_last_time = rte_rdtsc_precise() - deq_start_time;
time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
/* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
- &ops_deq[deq], 1);
- } while (unlikely(deq != 1));
+ &ops_deq[deq], enq);
+ } while (unlikely(deq == 0));
deq_last_time = rte_rdtsc_precise() - deq_start_time;
time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,