When replenishing Rx ring, there're always buffered slots reserved
between consumed entries and HW owned entries. These have to be filled
with fake mbufs to protect from possible overflow rather than
optimistically expecting successful replenishment which can cause
deadlock with small-sized queue.
Fixes:
fc048bd52cb7 ("net/mlx5: fix overflow of Rx SW ring")
Cc: stable@dpdk.org
Reported-by: Martin Weiser <martin.weiser@allegro-packets.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Tested-by: Martin Weiser <martin.weiser@allegro-packets.com>
{
const uint16_t q_n = 1 << rxq->elts_n;
const uint16_t q_mask = q_n - 1;
- const uint16_t elts_idx = rxq->rq_ci & q_mask;
+ uint16_t elts_idx = rxq->rq_ci & q_mask;
struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
unsigned int i;
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
rxq->rq_ci += n;
+ /* Prevent overflowing into consumed mbufs. */
+ elts_idx = rxq->rq_ci & q_mask;
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
rte_io_wmb();
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}
31, 30 /* 4th mCQE */
};
- /*
- * Not to overflow elts array. Decompress next time after mbuf
- * replenishment.
- */
- if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
- (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
- return;
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
}
elts_idx = rxq->rq_pi & q_mask;
elts = &(*rxq->elts)[elts_idx];
- pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
- (uint16_t)(rxq->rq_ci - rxq->cq_ci));
- /* Not to overflow pkts/elts array. */
- pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
if (!pkts_n)
10, 11, 2, 3);
#endif
- /*
- * Not to overflow elts array. Decompress next time after mbuf
- * replenishment.
- */
- if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
- (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
- return;
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
}
elts_idx = rxq->rq_pi & q_mask;
elts = &(*rxq->elts)[elts_idx];
- pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
- (uint16_t)(rxq->rq_ci - rxq->cq_ci));
- /* Not to overflow pkts/elts array. */
- pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
if (!pkts_n)