for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
if (likely(pos + i < mcqe_n))
rte_prefetch0((void *)(cq + pos + i));
-
/* A.1 load mCQEs into a 128bit register. */
mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
pos += MLX5_VPMD_DESCS_PER_LOOP;
/* Move to next CQE and invalidate consumed CQEs. */
if (!(pos & 0x7) && pos < mcqe_n) {
+ if (pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
mcq = (void *)(cq + pos);
for (i = 0; i < 8; ++i)
cq[inv++].op_own = MLX5_CQE_INVALIDATE;
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
- pkts[pos]->timestamp =
- rte_be_to_cpu_64(cq[pos].timestamp);
- pkts[pos + 1]->timestamp =
- rte_be_to_cpu_64(cq[pos + p1].timestamp);
- pkts[pos + 2]->timestamp =
- rte_be_to_cpu_64(cq[pos + p2].timestamp);
- pkts[pos + 3]->timestamp =
- rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ if (rxq->rt_timestamp) {
+ struct mlx5_dev_ctx_shared *sh = rxq->sh;
+ uint64_t ts;
+
+ ts = rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 1]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 2]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ pkts[pos + 3]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ } else {
+ pkts[pos]->timestamp = rte_be_to_cpu_64
+ (cq[pos].timestamp);
+ pkts[pos + 1]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp = rte_be_to_cpu_64
+ (cq[pos + p3].timestamp);
+ }
}
if (rxq->dynf_meta) {
/* This code is subject for futher optimization. */