X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Faf_xdp%2Frte_eth_af_xdp.c;h=3957227bf01cb592f8721fd95177539d35f418b6;hb=0984d196be2a92eb6e2e0b926fdb4a06a1d7d823;hp=1c1e3cadd65e738479042db7ebc77a50339ad679;hpb=62024eb8275696bead35b38a6062a2513f1f7c58;p=dpdk.git diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 1c1e3cadd6..3957227bf0 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -255,28 +255,32 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_umem_info *umem = rxq->umem; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; - /* allocate bufs for fill queue replenishment after rx */ - if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { - AF_XDP_LOG(DEBUG, - "Failed to get enough buffers for fq.\n"); - return 0; - } + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - - if (rcvd == 0) { + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif - goto out; + return 0; + } + + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); + /* rollback cached_cons which is added by + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -301,20 +305,14 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rx_bytes += len; } - xsk_ring_cons__release(rx, rcvd); - - (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq); + xsk_ring_cons__release(rx, nb_pkts); + (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #else static uint16_t @@ -326,7 +324,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_ring_prod *fq = &rxq->fq; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; uint32_t free_thresh = fq->size >> 1; struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; @@ -334,20 +332,24 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL, fq); - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) - return 0; - - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - if (rcvd == 0) { + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif + return 0; + } - goto out; + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) { + /* rollback cached_cons which is added by + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -366,18 +368,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[i] = mbufs[i]; } - xsk_ring_cons__release(rx, rcvd); + xsk_ring_cons__release(rx, nb_pkts); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #endif @@ -840,7 +837,6 @@ xdp_umem_destroy(struct xsk_umem_info *umem) #endif rte_free(umem); - umem = NULL; } static int @@ -910,13 +906,13 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, } #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) -static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align) +static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) { struct rte_mempool_memhdr *memhdr; - uint64_t memhdr_addr, aligned_addr; + uintptr_t memhdr_addr, aligned_addr; memhdr = STAILQ_FIRST(&mp->mem_list); - memhdr_addr = (uint64_t)memhdr->addr; + memhdr_addr = (uintptr_t)memhdr->addr; aligned_addr = memhdr_addr & ~(getpagesize() - 1); *align = memhdr_addr - aligned_addr; @@ -968,7 +964,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem->mb_pool = mb_pool; base_addr = (void *)get_base_addr(mb_pool, &align); - umem_size = mb_pool->populated_size * usr_config.frame_size + + umem_size = (uint64_t)mb_pool->populated_size * + (uint64_t)usr_config.frame_size + align; ret = xsk_umem__create(&umem->umem, base_addr, umem_size, @@ -1148,7 +1145,8 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, } #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) - if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) { + ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); + if (ret) { AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); goto err; } @@ -1562,6 +1560,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, eth_dev->data->dev_private = internals; eth_dev->data->dev_link = pmd_link; eth_dev->data->mac_addrs = &internals->eth_addr; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; eth_dev->dev_ops = &ops; eth_dev->rx_pkt_burst = eth_af_xdp_rx; eth_dev->tx_pkt_burst = eth_af_xdp_tx;