]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: fix deadlock due to buffered slots in Rx SW ring
authorYongseok Koh <yskoh@mellanox.com>
Tue, 10 Oct 2017 14:04:02 +0000 (07:04 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 12 Oct 2017 00:52:49 +0000 (01:52 +0100)
When replenishing Rx ring, there're always buffered slots reserved
between consumed entries and HW owned entries. These have to be filled
with fake mbufs to protect from possible overflow rather than
optimistically expecting successful replenishment which can cause
deadlock with small-sized queue.

Fixes: fc048bd52cb7 ("net/mlx5: fix overflow of Rx SW ring")
Cc: stable@dpdk.org
Reported-by: Martin Weiser <martin.weiser@allegro-packets.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Tested-by: Martin Weiser <martin.weiser@allegro-packets.com>
drivers/net/mlx5/mlx5_rxtx_vec.h
drivers/net/mlx5/mlx5_rxtx_vec_neon.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.h

index 426169037c3bf8bbb4699d8b03f6a49533d5a10e..1f08ed0b27818c001ec3e1202a4e1111c857c5d3 100644 (file)
@@ -101,7 +101,7 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
 {
        const uint16_t q_n = 1 << rxq->elts_n;
        const uint16_t q_mask = q_n - 1;
-       const uint16_t elts_idx = rxq->rq_ci & q_mask;
+       uint16_t elts_idx = rxq->rq_ci & q_mask;
        struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
        volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
        unsigned int i;
@@ -119,6 +119,10 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
                wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
                                              RTE_PKTMBUF_HEADROOM);
        rxq->rq_ci += n;
+       /* Prevent overflowing into consumed mbufs. */
+       elts_idx = rxq->rq_ci & q_mask;
+       for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+               (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
        rte_io_wmb();
        *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
 }
index 6dd18b61957cfada4ac2d027420b06280ecd6ff2..86b37d5c6a4d44c47cc9b331a8d8c677e99954bd 100644 (file)
@@ -445,13 +445,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                31, 30          /* 4th mCQE */
        };
 
-       /*
-        * Not to overflow elts array. Decompress next time after mbuf
-        * replenishment.
-        */
-       if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
-                    (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
-               return;
        /*
         * A. load mCQEs into a 128bit register.
         * B. store rearm data to mbuf.
@@ -778,10 +771,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
        }
        elts_idx = rxq->rq_pi & q_mask;
        elts = &(*rxq->elts)[elts_idx];
-       pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
-                        (uint16_t)(rxq->rq_ci - rxq->cq_ci));
-       /* Not to overflow pkts/elts array. */
-       pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
+       /* Not to overflow pkts array. */
+       pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
        /* Not to cross queue end. */
        pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
        if (!pkts_n)
index 88c5d75facfb5d698227f5a4e091583a7ee00e18..c2142d7ca227814c9f54517e57a56b5f862a8638 100644 (file)
@@ -436,13 +436,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                             10, 11,  2,  3);
 #endif
 
-       /*
-        * Not to overflow elts array. Decompress next time after mbuf
-        * replenishment.
-        */
-       if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
-                    (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
-               return;
        /*
         * A. load mCQEs into a 128bit register.
         * B. store rearm data to mbuf.
@@ -764,10 +757,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
        }
        elts_idx = rxq->rq_pi & q_mask;
        elts = &(*rxq->elts)[elts_idx];
-       pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
-                        (uint16_t)(rxq->rq_ci - rxq->cq_ci));
-       /* Not to overflow pkts/elts array. */
-       pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
+       /* Not to overflow pkts array. */
+       pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
        /* Not to cross queue end. */
        pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
        if (!pkts_n)