From ab7ed23a2f3e74f1341de76b19256842c4cdc7f2 Mon Sep 17 00:00:00 2001 From: RongQing Li Date: Fri, 18 Sep 2020 19:32:31 +0800 Subject: [PATCH] net/af_xdp: avoid deadlock due to empty fill queue While receiving packets, it is possible to fail to reserve fill queue, since buffer ring is shared between tx and rx, and maybe not available temporary. As a result both fill queue and Rx queue will be empty. Then kernel side will not be able to receive packets due to empty fill queue, and dpdk will not be able to reserve fill queue because dpdk doesn't have packets to receive, finally deadlock will happen. So move reserve fill queue before xsk_ring_cons__peek to fix it. Cc: stable@dpdk.org Signed-off-by: RongQing Li Signed-off-by: Dongsheng Rong Acked-by: Ciara Loftus --- drivers/net/af_xdp/rte_eth_af_xdp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index b65ee449fc..00de671841 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -304,6 +304,10 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint32_t free_thresh = fq->size >> 1; struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) + (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL); + + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) return 0; @@ -317,9 +321,6 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto out; } - if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) - (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL); - for (i = 0; i < rcvd; i++) { const struct xdp_desc *desc; uint64_t addr; -- 2.20.1