From 828274b70ad10bc1ae54c4b4b446e7f42f181521 Mon Sep 17 00:00:00 2001 From: Alexander Kozyrev Date: Wed, 4 Aug 2021 09:23:16 +0300 Subject: [PATCH] net/mlx5: fix mbuf replenishment check for zipped CQE A core dump is being generated with the following call stack: 0 _mm256_storeu_si256 (__A=..., __P=0x80) 1 rte_mov32 (src=0x2299c9140 "", dst=0x80) 2 rte_memcpy_aligned (n=60, src=0x2299c9140, dst=0x80) 3 rte_memcpy (n=60, src=0x2299c9140, dst=0x80) 4 mprq_buf_to_pkt (strd_cnt=1, strd_idx=0, buf=0x2299c8a00, len=60, pkt=0x18345f0c0, rxq=0x18345ef40) 5 rxq_copy_mprq_mbuf_v (rxq=0x18345ef40, pkts=0x7f76e0ff6d18, pkts_n=5) 6 rxq_burst_mprq_v (rxq=0x18345ef40, pkts=0x7f76e0ff6d18, pkts_n=46, err=0x7f76e0ff6a28, no_cq=0x7f76e0ff6a27) 7 mlx5_rx_burst_mprq_vec (dpdk_rxq=0x18345ef40, pkts=0x7f76e0ff6a88, pkts_n=128) 8 rte_eth_rx_burst (nb_pkts=128, rx_pkts=0x7f76e0ff6a88, queue_id=, port_id=) This crash is caused by an attempt to copy previously uncompressed CQEs into non-allocated mbufs. There is a check to make sure we only use allocated mbufs in the rxq_burst_mprq_v() function, but it is done only before the main processing loop. Leftovers of compressed CQEs session are handled before that loop and may lead to the mbufs overflow as seen. Move the check for replenished mbufs up to protect uncompressed CQEs session leftovers from accessing non-allocated mbufs after the mlx5_rx_mprq_replenish_bulk_mbuf() function is invoked. Bugzilla ID: 746 Fixes: 0f20acbf5eda ("net/mlx5: implement vectorized MPRQ burst") Cc: stable@dpdk.org Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_rxtx_vec.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index e1b6d5422a..ecd273e00a 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -448,6 +448,8 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, rte_prefetch0(cq + 3); pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); mlx5_rx_mprq_replenish_bulk_mbuf(rxq); + /* Not to move past the allocated mbufs. */ + pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->decompressed; if (rcvd_pkt > 0) { @@ -463,8 +465,6 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, /* Not to cross queue end. */ pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx); pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); - /* Not to move past the allocated mbufs. */ - pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); if (!pkts_n) { *no_cq = !cp_pkt; return cp_pkt; -- 2.20.1