net/e1000: remove MTU setting limitation
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
index ac00cba..3957227 100644 (file)
@@ -19,8 +19,8 @@
 #include <bpf/xsk.h>
 
 #include <rte_ethdev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
 #include <rte_kvargs.h>
 #include <rte_bus_vdev.h>
 #include <rte_string_fns.h>
@@ -255,28 +255,32 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct xsk_umem_info *umem = rxq->umem;
        uint32_t idx_rx = 0;
        unsigned long rx_bytes = 0;
-       int rcvd, i;
+       int i;
        struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
 
-       /* allocate bufs for fill queue replenishment after rx */
-       if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
-               AF_XDP_LOG(DEBUG,
-                       "Failed to get enough buffers for fq.\n");
-               return 0;
-       }
-
-       rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+       nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
 
-       if (rcvd == 0) {
+       if (nb_pkts == 0) {
 #if defined(XDP_USE_NEED_WAKEUP)
                if (xsk_ring_prod__needs_wakeup(fq))
                        (void)poll(rxq->fds, 1, 1000);
 #endif
 
-               goto out;
+               return 0;
        }
 
-       for (i = 0; i < rcvd; i++) {
+       /* allocate bufs for fill queue replenishment after rx */
+       if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
+               AF_XDP_LOG(DEBUG,
+                       "Failed to get enough buffers for fq.\n");
+               /* rollback cached_cons which is added by
+                * xsk_ring_cons__peek
+                */
+               rx->cached_cons -= nb_pkts;
+               return 0;
+       }
+
+       for (i = 0; i < nb_pkts; i++) {
                const struct xdp_desc *desc;
                uint64_t addr;
                uint32_t len;
@@ -301,20 +305,14 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rx_bytes += len;
        }
 
-       xsk_ring_cons__release(rx, rcvd);
-
-       (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq);
+       xsk_ring_cons__release(rx, nb_pkts);
+       (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
 
        /* statistics */
-       rxq->stats.rx_pkts += rcvd;
+       rxq->stats.rx_pkts += nb_pkts;
        rxq->stats.rx_bytes += rx_bytes;
 
-out:
-       if (rcvd != nb_pkts)
-               rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
-                                    nb_pkts - rcvd);
-
-       return rcvd;
+       return nb_pkts;
 }
 #else
 static uint16_t
@@ -326,7 +324,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct xsk_ring_prod *fq = &rxq->fq;
        uint32_t idx_rx = 0;
        unsigned long rx_bytes = 0;
-       int rcvd, i;
+       int i;
        uint32_t free_thresh = fq->size >> 1;
        struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
 
@@ -334,20 +332,24 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
                                         NULL, fq);
 
-       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
-               return 0;
-
-       rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
-       if (rcvd == 0) {
+       nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+       if (nb_pkts == 0) {
 #if defined(XDP_USE_NEED_WAKEUP)
                if (xsk_ring_prod__needs_wakeup(fq))
                        (void)poll(rxq->fds, 1, 1000);
 #endif
+               return 0;
+       }
 
-               goto out;
+       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
+               /* rollback cached_cons which is added by
+                * xsk_ring_cons__peek
+                */
+               rx->cached_cons -= nb_pkts;
+               return 0;
        }
 
-       for (i = 0; i < rcvd; i++) {
+       for (i = 0; i < nb_pkts; i++) {
                const struct xdp_desc *desc;
                uint64_t addr;
                uint32_t len;
@@ -366,18 +368,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                bufs[i] = mbufs[i];
        }
 
-       xsk_ring_cons__release(rx, rcvd);
+       xsk_ring_cons__release(rx, nb_pkts);
 
        /* statistics */
-       rxq->stats.rx_pkts += rcvd;
+       rxq->stats.rx_pkts += nb_pkts;
        rxq->stats.rx_bytes += rx_bytes;
 
-out:
-       if (rcvd != nb_pkts)
-               rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
-                                    nb_pkts - rcvd);
-
-       return rcvd;
+       return nb_pkts;
 }
 #endif
 
@@ -599,10 +596,11 @@ eth_dev_start(struct rte_eth_dev *dev)
 }
 
 /* This function gets called when the current port gets stopped. */
-static void
+static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
        dev->data->dev_link.link_status = ETH_LINK_DOWN;
+       return 0;
 }
 
 /* Find ethdev in list */
@@ -634,16 +632,35 @@ find_internal_resource(struct pmd_internals *port_int)
        return list;
 }
 
+/* Check if the netdev,qid context already exists */
+static inline bool
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+               struct pkt_rx_queue *list_rxq, const char *list_ifname)
+{
+       bool exists = false;
+
+       if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+                       !strncmp(ifname, list_ifname, IFNAMSIZ)) {
+               AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+                                       ifname, rxq->xsk_queue_idx);
+               exists = true;
+       }
+
+       return exists;
+}
+
 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
-static inline struct xsk_umem_info *
-get_shared_umem(struct pkt_rx_queue *rxq) {
+static inline int
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+                       struct xsk_umem_info **umem)
+{
        struct internal_list *list;
        struct pmd_internals *internals;
-       int i = 0;
+       int i = 0, ret = 0;
        struct rte_mempool *mb_pool = rxq->mb_pool;
 
        if (mb_pool == NULL)
-               return NULL;
+               return ret;
 
        pthread_mutex_lock(&internal_list_lock);
 
@@ -655,20 +672,25 @@ get_shared_umem(struct pkt_rx_queue *rxq) {
                        if (rxq == list_rxq)
                                continue;
                        if (mb_pool == internals->rx_queues[i].mb_pool) {
+                               if (ctx_exists(rxq, ifname, list_rxq,
+                                               internals->if_name)) {
+                                       ret = -1;
+                                       goto out;
+                               }
                                if (__atomic_load_n(
                                        &internals->rx_queues[i].umem->refcnt,
                                                        __ATOMIC_ACQUIRE)) {
-                                       pthread_mutex_unlock(
-                                                       &internal_list_lock);
-                                       return internals->rx_queues[i].umem;
+                                       *umem = internals->rx_queues[i].umem;
+                                       goto out;
                                }
                        }
                }
        }
 
+out:
        pthread_mutex_unlock(&internal_list_lock);
 
-       return NULL;
+       return ret;
 }
 
 static int
@@ -815,7 +837,6 @@ xdp_umem_destroy(struct xsk_umem_info *umem)
 #endif
 
        rte_free(umem);
-       umem = NULL;
 }
 
 static int
@@ -885,13 +906,13 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
 }
 
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
+static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
 {
        struct rte_mempool_memhdr *memhdr;
-       uint64_t memhdr_addr, aligned_addr;
+       uintptr_t memhdr_addr, aligned_addr;
 
        memhdr = STAILQ_FIRST(&mp->mem_list);
-       memhdr_addr = (uint64_t)memhdr->addr;
+       memhdr_addr = (uintptr_t)memhdr->addr;
        aligned_addr = memhdr_addr & ~(getpagesize() - 1);
        *align = memhdr_addr - aligned_addr;
 
@@ -913,7 +934,9 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
        uint64_t umem_size, align = 0;
 
        if (internals->shared_umem) {
-               umem = get_shared_umem(rxq);
+               if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
+                       return NULL;
+
                if (umem != NULL &&
                        __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
                                        umem->max_xsks) {
@@ -941,7 +964,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
 
                umem->mb_pool = mb_pool;
                base_addr = (void *)get_base_addr(mb_pool, &align);
-               umem_size = mb_pool->populated_size * usr_config.frame_size +
+               umem_size = (uint64_t)mb_pool->populated_size *
+                               (uint64_t)usr_config.frame_size +
                                align;
 
                ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
@@ -1121,7 +1145,8 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
        }
 
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-       if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
+       ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
+       if (ret) {
                AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
                goto err;
        }
@@ -1535,6 +1560,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
        eth_dev->data->dev_private = internals;
        eth_dev->data->dev_link = pmd_link;
        eth_dev->data->mac_addrs = &internals->eth_addr;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->dev_ops = &ops;
        eth_dev->rx_pkt_burst = eth_af_xdp_rx;
        eth_dev->tx_pkt_burst = eth_af_xdp_tx;