-1UL << ((mcqe_n - pos) *
sizeof(uint16_t) * 8) : 0);
#endif
+
for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
if (likely(pos + i < mcqe_n))
rte_prefetch0((void *)(cq + pos + i));
elts[pos + 2]->hash.fdir.hi = flow_tag;
elts[pos + 3]->hash.fdir.hi = flow_tag;
}
- if (!!rxq->flow_meta_mask) {
+ if (rxq->dynf_meta) {
int32_t offs = rxq->flow_meta_offset;
const uint32_t meta =
*RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *);
/* Check if title packet has valid metadata. */
if (meta) {
- MLX5_ASSERT(t_pkt->ol_flags & offs);
+ MLX5_ASSERT(t_pkt->ol_flags &
+ rxq->flow_meta_mask);
*RTE_MBUF_DYNFIELD(elts[pos], offs,
uint32_t *) = meta;
*RTE_MBUF_DYNFIELD(elts[pos + 1], offs,
pos += MLX5_VPMD_DESCS_PER_LOOP;
/* Move to next CQE and invalidate consumed CQEs. */
if (!(pos & 0x7) && pos < mcqe_n) {
+ if (pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
mcq = (void *)&(cq + pos)->pkt_info;
for (i = 0; i < 8; ++i)
cq[inv++].op_own = MLX5_CQE_INVALIDATE;
* @param[out] err
* Pointer to a flag. Set non-zero value if pkts array has at least one error
* packet to handle.
+ * @param[out] no_cq
+ * Pointer to a boolean. Set true if no new CQE seen.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint64_t *err)
+ uint64_t *err, bool *no_cq)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
- if (!pkts_n)
+ if (!pkts_n) {
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
+ }
/* At this point, there shouldn't be any remained packets. */
MLX5_ASSERT(rxq->decompressed == 0);
/*
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (rxq->hw_timestamp) {
- elts[pos]->timestamp =
- rte_be_to_cpu_64(
- container_of(p0, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 1]->timestamp =
- rte_be_to_cpu_64(
- container_of(p1, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 2]->timestamp =
- rte_be_to_cpu_64(
- container_of(p2, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 3]->timestamp =
- rte_be_to_cpu_64(
- container_of(p3, struct mlx5_cqe,
- pkt_info)->timestamp);
+ if (rxq->rt_timestamp) {
+ struct mlx5_dev_ctx_shared *sh = rxq->sh;
+ uint64_t ts;
+
+ ts = rte_be_to_cpu_64
+ (container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ ts = rte_be_to_cpu_64
+ (container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp =
+ mlx5_txpp_convert_rx_ts(sh, ts);
+ } else {
+ elts[pos]->timestamp = rte_be_to_cpu_64
+ (container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp = rte_be_to_cpu_64
+ (container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp = rte_be_to_cpu_64
+ (container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp = rte_be_to_cpu_64
+ (container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ }
}
- if (!!rxq->flow_meta_mask) {
+ if (rxq->dynf_meta) {
/* This code is subject for futher optimization. */
int32_t offs = rxq->flow_meta_offset;
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
container_of(p0, struct mlx5_cqe,
pkt_info)->flow_table_metadata;
- *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+ *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
container_of(p1, struct mlx5_cqe,
pkt_info)->flow_table_metadata;
- *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+ *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
container_of(p2, struct mlx5_cqe,
pkt_info)->flow_table_metadata;
- *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+ *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
container_of(p3, struct mlx5_cqe,
pkt_info)->flow_table_metadata;
if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
break;
}
/* If no new CQE seen, return without updating cq_db. */
- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
+ *no_cq = true;
return rcvd_pkt;
+ }
/* Update the consumer indexes for non-compressed CQEs. */
MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
}
rte_cio_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
}