From 6dd3286f3a62be287899551b9c422e579af95b0b Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Fri, 11 Mar 2022 13:45:13 +0000 Subject: [PATCH] net/af_xdp: fix shared UMEM fill queue reserve Commit 81fe6720f84f ("net/af_xdp: reserve fill queue before socket create") moves the fill queue reserve logic to before the creation of the socket in order to suppress kernel logs like: XSK buffer pool does not provide enough addresses to fill 2047 buffers on Rx ring 0 However, for queues that share umem, the fill queue reserve must occur after the socket creation, because the fill queue is not valid until that point. This commit uses the umem refcnt value to determine whether the queue is sharing a umem, and performs the fill queue reservation either before or after the socket creation, depending on the refcnt value. The kernel logs will still be seen for the shared umem queues. Fixes: 81fe6720f84f ("net/af_xdp: reserve fill queue before socket create") Signed-off-by: Ciara Loftus --- drivers/net/af_xdp/rte_eth_af_xdp.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 9920f49870..1e37da6e84 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -1277,11 +1277,13 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, int ret = 0; int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS; struct rte_mbuf *fq_bufs[reserve_size]; + bool reserve_before; rxq->umem = xdp_umem_configure(internals, rxq); if (rxq->umem == NULL) return -ENOMEM; txq->umem = rxq->umem; + reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1; #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); @@ -1291,10 +1293,13 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, } #endif - ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq); - if (ret) { - AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n"); - goto out_umem; + /* reserve fill queue of queues not (yet) sharing UMEM */ + if (reserve_before) { + ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq); + if (ret) { + AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n"); + goto out_umem; + } } cfg.rx_size = ring_size; @@ -1336,6 +1341,15 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, goto out_umem; } + if (!reserve_before) { + /* reserve fill queue of queues sharing UMEM */ + ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq); + if (ret) { + AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n"); + goto out_xsk; + } + } + /* insert the xsk into the xsks_map */ if (internals->custom_prog_configured) { int err, fd; -- 2.20.1