/* Stores time statistics */
struct test_time_stats {
/* Stores software enqueue total working time */
- uint64_t enq_sw_tot_time;
+ uint64_t enq_sw_total_time;
/* Stores minimum value of software enqueue working time */
uint64_t enq_sw_min_time;
/* Stores maximum value of software enqueue working time */
uint64_t enq_sw_max_time;
/* Stores turbo enqueue total working time */
- uint64_t enq_tur_tot_time;
- /* Stores minimum value of turbo enqueue working time */
- uint64_t enq_tur_min_time;
- /* Stores maximum value of turbo enqueue working time */
- uint64_t enq_tur_max_time;
+ uint64_t enq_acc_total_time;
+ /* Stores minimum value of accelerator enqueue working time */
+ uint64_t enq_acc_min_time;
+ /* Stores maximum value of accelerator enqueue working time */
+ uint64_t enq_acc_max_time;
/* Stores dequeue total working time */
- uint64_t deq_tot_time;
+ uint64_t deq_total_time;
/* Stores minimum value of dequeue working time */
uint64_t deq_min_time;
/* Stores maximum value of dequeue working time */
burst_sz = tp->op_params->burst_sz;
num_to_process = tp->op_params->num_to_process;
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops,
burst_sz);
- else
+ rte_bbdev_dec_op_free_bulk(dec_ops, deq);
+ } else {
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops,
burst_sz);
+ rte_bbdev_enc_op_free_bulk(enc_ops, deq);
+ }
if (deq < burst_sz) {
printf(
enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id, ops,
num_to_enq);
-
- rte_bbdev_dec_op_free_bulk(ops, num_to_enq);
}
if (allocs_failed > 0)
enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id, ops,
num_to_enq);
-
- rte_bbdev_enc_op_free_bulk(ops, num_to_enq);
}
if (allocs_failed > 0)
RTE_LCORE_FOREACH(lcore_id) {
if (iter++ >= used_cores)
break;
- printf("\tlcore_id: %u, throughput: %.8lg MOPS, %.8lg Mbps\n",
- lcore_id, t_params[lcore_id].mops, t_params[lcore_id].mbps);
+ printf("Throughput for core (%u): %.8lg MOPS, %.8lg Mbps\n",
+ lcore_id, t_params[lcore_id].mops,
+ t_params[lcore_id].mbps);
total_mops += t_params[lcore_id].mops;
total_mbps += t_params[lcore_id].mbps;
}
printf(
- "\n\tTotal stats for %u cores: throughput: %.8lg MOPS, %.8lg Mbps\n",
+ "\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps\n",
used_cores, total_mops, total_mbps);
}
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
printf(
- "Validation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ "\nValidation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
if (iter <= 0)
return TEST_FAILED;
- printf("\toperation latency:\n"
- "\t\tavg latency: %lg cycles, %lg us\n"
- "\t\tmin latency: %lg cycles, %lg us\n"
- "\t\tmax latency: %lg cycles, %lg us\n",
+ printf("Operation latency:\n"
+ "\tavg latency: %lg cycles, %lg us\n"
+ "\tmin latency: %lg cycles, %lg us\n"
+ "\tmax latency: %lg cycles, %lg us\n",
(double)total_time / (double)iter,
(double)(total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)min_time,
stats->dequeued_count = q_stats->dequeued_count;
stats->enqueue_err_count = q_stats->enqueue_err_count;
stats->dequeue_err_count = q_stats->dequeue_err_count;
- stats->offload_time = q_stats->offload_time;
+ stats->acc_offload_cycles = q_stats->acc_offload_cycles;
return 0;
}
queue_id, dev_id);
enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
- stats.offload_time;
+ stats.acc_offload_cycles;
time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
enq_sw_last_time);
time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
enq_sw_last_time);
- time_st->enq_sw_tot_time += enq_sw_last_time;
+ time_st->enq_sw_total_time += enq_sw_last_time;
- time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
- stats.offload_time);
- time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
- stats.offload_time);
- time_st->enq_tur_tot_time += stats.offload_time;
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
/* ensure enqueue has been completed */
rte_delay_ms(10);
deq_last_time);
time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
deq_last_time);
- time_st->deq_tot_time += deq_last_time;
+ time_st->deq_total_time += deq_last_time;
/* Dequeue remaining operations if needed*/
while (burst_sz != deq)
queue_id, dev_id);
enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
- stats.offload_time;
+ stats.acc_offload_cycles;
time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
enq_sw_last_time);
time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
enq_sw_last_time);
- time_st->enq_sw_tot_time += enq_sw_last_time;
+ time_st->enq_sw_total_time += enq_sw_last_time;
- time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
- stats.offload_time);
- time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
- stats.offload_time);
- time_st->enq_tur_tot_time += stats.offload_time;
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
/* ensure enqueue has been completed */
rte_delay_ms(10);
deq_last_time);
time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
deq_last_time);
- time_st->deq_tot_time += deq_last_time;
+ time_st->deq_total_time += deq_last_time;
while (burst_sz != deq)
deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
memset(&time_st, 0, sizeof(struct test_time_stats));
time_st.enq_sw_min_time = UINT64_MAX;
- time_st.enq_tur_min_time = UINT64_MAX;
+ time_st.enq_acc_min_time = UINT64_MAX;
time_st.deq_min_time = UINT64_MAX;
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
printf(
- "Offload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ "\nOffload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
if (iter <= 0)
return TEST_FAILED;
- printf("\tenq offload cost latency:\n"
- "\t\tsoftware avg %lg cycles, %lg us\n"
- "\t\tsoftware min %lg cycles, %lg us\n"
- "\t\tsoftware max %lg cycles, %lg us\n"
- "\t\tturbo avg %lg cycles, %lg us\n"
- "\t\tturbo min %lg cycles, %lg us\n"
- "\t\tturbo max %lg cycles, %lg us\n",
- (double)time_st.enq_sw_tot_time / (double)iter,
- (double)(time_st.enq_sw_tot_time * 1000000) /
+ printf("Enqueue offload cost latency:\n"
+ "\tDriver offload avg %lg cycles, %lg us\n"
+ "\tDriver offload min %lg cycles, %lg us\n"
+ "\tDriver offload max %lg cycles, %lg us\n"
+ "\tAccelerator offload avg %lg cycles, %lg us\n"
+ "\tAccelerator offload min %lg cycles, %lg us\n"
+ "\tAccelerator offload max %lg cycles, %lg us\n",
+ (double)time_st.enq_sw_total_time / (double)iter,
+ (double)(time_st.enq_sw_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
(double)time_st.enq_sw_min_time,
(double)(time_st.enq_sw_min_time * 1000000) /
rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
(double)(time_st.enq_sw_max_time * 1000000) /
- rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time /
+ rte_get_tsc_hz(), (double)time_st.enq_acc_total_time /
(double)iter,
- (double)(time_st.enq_tur_tot_time * 1000000) /
+ (double)(time_st.enq_acc_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
- (double)time_st.enq_tur_min_time,
- (double)(time_st.enq_tur_min_time * 1000000) /
- rte_get_tsc_hz(), (double)time_st.enq_tur_max_time,
- (double)(time_st.enq_tur_max_time * 1000000) /
+ (double)time_st.enq_acc_min_time,
+ (double)(time_st.enq_acc_min_time * 1000000) /
+ rte_get_tsc_hz(), (double)time_st.enq_acc_max_time,
+ (double)(time_st.enq_acc_max_time * 1000000) /
rte_get_tsc_hz());
- printf("\tdeq offload cost latency - one op:\n"
- "\t\tavg %lg cycles, %lg us\n"
- "\t\tmin %lg cycles, %lg us\n"
- "\t\tmax %lg cycles, %lg us\n",
- (double)time_st.deq_tot_time / (double)iter,
- (double)(time_st.deq_tot_time * 1000000) /
+ printf("Dequeue offload cost latency - one op:\n"
+ "\tavg %lg cycles, %lg us\n"
+ "\tmin %lg cycles, %lg us\n"
+ "\tmax %lg cycles, %lg us\n",
+ (double)time_st.deq_total_time / (double)iter,
+ (double)(time_st.deq_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
(double)time_st.deq_min_time,
(double)(time_st.deq_min_time * 1000000) /
static int
offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *deq_tot_time, uint64_t *deq_min_time,
+ uint64_t *deq_total_time, uint64_t *deq_min_time,
uint64_t *deq_max_time)
{
int i, deq_total;
deq_last_time = rte_rdtsc_precise() - deq_start_time;
*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
- *deq_tot_time += deq_last_time;
+ *deq_total_time += deq_last_time;
}
return i;
static int
offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *deq_tot_time, uint64_t *deq_min_time,
+ uint64_t *deq_total_time, uint64_t *deq_min_time,
uint64_t *deq_max_time)
{
int i, deq_total;
deq_last_time = rte_rdtsc_precise() - deq_start_time;
*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
- *deq_tot_time += deq_last_time;
+ *deq_total_time += deq_last_time;
}
return i;
return TEST_SKIPPED;
#else
int iter;
- uint64_t deq_tot_time, deq_min_time, deq_max_time;
+ uint64_t deq_total_time, deq_min_time, deq_max_time;
uint16_t burst_sz = op_params->burst_sz;
const uint16_t num_to_process = op_params->num_to_process;
const enum rte_bbdev_op_type op_type = test_vector.op_type;
struct rte_bbdev_info info;
const char *op_type_str;
- deq_tot_time = deq_max_time = 0;
+ deq_total_time = deq_max_time = 0;
deq_min_time = UINT64_MAX;
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
printf(
- "Offload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ "\nOffload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
- num_to_process, burst_sz, &deq_tot_time,
+ num_to_process, burst_sz, &deq_total_time,
&deq_min_time, &deq_max_time);
else
iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
- num_to_process, burst_sz, &deq_tot_time,
+ num_to_process, burst_sz, &deq_total_time,
&deq_min_time, &deq_max_time);
if (iter <= 0)
return TEST_FAILED;
- printf("\tempty deq offload\n"
- "\t\tavg. latency: %lg cycles, %lg us\n"
- "\t\tmin. latency: %lg cycles, %lg us\n"
- "\t\tmax. latency: %lg cycles, %lg us\n",
- (double)deq_tot_time / (double)iter,
- (double)(deq_tot_time * 1000000) / (double)iter /
+ printf("Empty dequeue offload\n"
+ "\tavg. latency: %lg cycles, %lg us\n"
+ "\tmin. latency: %lg cycles, %lg us\n"
+ "\tmax. latency: %lg cycles, %lg us\n",
+ (double)deq_total_time / (double)iter,
+ (double)(deq_total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)deq_min_time,
(double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
(double)deq_max_time, (double)(deq_max_time * 1000000) /
#ifdef RTE_BBDEV_OFFLOAD_COST
start_time = rte_rdtsc_precise();
#endif
+ /* CRC24A generation */
bblib_lte_crc24a_gen(&crc_req, &crc_resp);
#ifdef RTE_BBDEV_OFFLOAD_COST
- q_stats->offload_time += rte_rdtsc_precise() - start_time;
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
#endif
} else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
/* CRC24B */
#ifdef RTE_BBDEV_OFFLOAD_COST
start_time = rte_rdtsc_precise();
#endif
+ /* CRC24B generation */
bblib_lte_crc24b_gen(&crc_req, &crc_resp);
#ifdef RTE_BBDEV_OFFLOAD_COST
- q_stats->offload_time += rte_rdtsc_precise() - start_time;
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
#endif
} else {
ret = is_enc_input_valid(k, k_idx, total_left);
#ifdef RTE_BBDEV_OFFLOAD_COST
start_time = rte_rdtsc_precise();
#endif
-
+ /* Turbo encoding */
if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Turbo Encoder failed");
return;
}
-
#ifdef RTE_BBDEV_OFFLOAD_COST
- q_stats->offload_time += rte_rdtsc_precise() - start_time;
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
#endif
/* Restore 3 first bytes of next CB if they were overwritten by CRC*/
#ifdef RTE_BBDEV_OFFLOAD_COST
start_time = rte_rdtsc_precise();
#endif
-
+ /* Rate-Matching */
if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Rate matching failed");
return;
}
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
+#endif
/* SW fills an entire last byte even if E%8 != 0. Clear the
* superfluous data bits for consistency with HW device.
*/
mask_id = (e & 7) >> 1;
rm_out[out_len - 1] &= mask_out[mask_id];
-
-#ifdef RTE_BBDEV_OFFLOAD_COST
- q_stats->offload_time += rte_rdtsc_precise() - start_time;
-#endif
-
enc->output.length += rm_resp.OutputLen;
} else {
/* Rate matching is bypassed */
{
uint16_t i;
#ifdef RTE_BBDEV_OFFLOAD_COST
- queue_stats->offload_time = 0;
+ queue_stats->acc_offload_cycles = 0;
#endif
for (i = 0; i < nb_ops; ++i)
process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
- bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left)
+ bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left,
+ struct rte_bbdev_stats *q_stats)
{
int ret;
int32_t k_idx;
struct bblib_turbo_decoder_request turbo_req;
struct bblib_turbo_decoder_response turbo_resp;
struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ uint64_t start_time;
+#else
+ RTE_SET_USED(q_stats);
+#endif
k_idx = compute_idx(k);
deint_req.pharqbuffer = q->deint_input;
deint_req.ncb = ncb_without_null;
deint_resp.pinteleavebuffer = q->deint_output;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
bblib_deinterleave_ul(&deint_req, &deint_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
+#endif
} else
move_padding_bytes(in, q->deint_output, k, ncb);
adapter_req.ncb = ncb_without_null;
adapter_req.pinteleavebuffer = adapter_input;
adapter_resp.pharqout = q->adapter_output;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ /* Turbo decode adaptation */
bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
+#endif
out = (uint8_t *)rte_pktmbuf_append(m_out, ((k - crc24_overlap) >> 3));
if (out == NULL) {
turbo_resp.ag_buf = q->ag;
turbo_resp.cb_buf = q->code_block;
turbo_resp.output = out;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ /* Turbo decode */
iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
+#endif
dec->hard_output.length += (k >> 3);
if (iter_cnt > 0) {
/* Temporary solution for returned iter_count from SDK */
- iter_cnt = (iter_cnt - 1) / 2;
+ iter_cnt = (iter_cnt - 1) >> 1;
dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
} else {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
}
static inline void
-enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
+enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
+ struct rte_bbdev_stats *queue_stats)
{
uint8_t c, r = 0;
uint16_t kw, k = 0;
process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
out_offset, check_bit(dec->op_flags,
RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap,
- total_left);
+ total_left, queue_stats);
/* To keep CRC24 attached to end of Code block, use
* RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it
* removed by default once verified.
static inline uint16_t
enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
- uint16_t nb_ops)
+ uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
{
uint16_t i;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ queue_stats->acc_offload_cycles = 0;
+#endif
for (i = 0; i < nb_ops; ++i)
- enqueue_dec_one_op(q, ops[i]);
+ enqueue_dec_one_op(q, ops[i], queue_stats);
return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
NULL);
struct turbo_sw_queue *q = queue;
uint16_t nb_enqueued = 0;
- nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops);
+ nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops, &q_data->queue_stats);
q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
q_data->queue_stats.enqueued_count += nb_enqueued;