net: add rte prefix to ether structures
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
index c46916b..893c9ed 100644 (file)
@@ -109,7 +109,7 @@ struct pmd_internals {
        int if_index;
        char if_name[IFNAMSIZ];
        uint16_t queue_idx;
-       struct ether_addr eth_addr;
+       struct rte_ether_addr eth_addr;
        struct xsk_umem_info *umem;
        struct rte_mempool *mb_pool_share;
 
@@ -134,30 +134,34 @@ static const struct rte_eth_link pmd_link = {
 };
 
 static inline int
-reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size)
+reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size)
 {
        struct xsk_ring_prod *fq = &umem->fq;
+       void *addrs[reserve_size];
        uint32_t idx;
-       int i, ret;
+       uint16_t i;
 
-       ret = xsk_ring_prod__reserve(fq, reserve_size, &idx);
-       if (unlikely(!ret)) {
-               AF_XDP_LOG(ERR, "Failed to reserve enough fq descs.\n");
-               return ret;
+       if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
+                   != reserve_size) {
+               AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+               return -1;
+       }
+
+       if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
+               AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
+               rte_ring_enqueue_bulk(umem->buf_ring, addrs,
+                               reserve_size, NULL);
+               return -1;
        }
 
        for (i = 0; i < reserve_size; i++) {
                __u64 *fq_addr;
-               void *addr = NULL;
-               if (rte_ring_dequeue(umem->buf_ring, &addr)) {
-                       i--;
-                       break;
-               }
+
                fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
-               *fq_addr = (uint64_t)addr;
+               *fq_addr = (uint64_t)addrs[i];
        }
 
-       xsk_ring_prod__submit(fq, i);
+       xsk_ring_prod__submit(fq, reserve_size);
 
        return 0;
 }
@@ -171,24 +175,23 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct xsk_ring_prod *fq = &umem->fq;
        uint32_t idx_rx = 0;
        uint32_t free_thresh = fq->size >> 1;
-       struct rte_mbuf *mbufs[ETH_AF_XDP_TX_BATCH_SIZE];
+       struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
        unsigned long dropped = 0;
        unsigned long rx_bytes = 0;
-       uint16_t count = 0;
        int rcvd, i;
 
-       nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
+       nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
+
+       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
+               return 0;
 
        rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
        if (rcvd == 0)
-               return 0;
+               goto out;
 
        if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
                (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE);
 
-       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, rcvd) != 0))
-               return 0;
-
        for (i = 0; i < rcvd; i++) {
                const struct xdp_desc *desc;
                uint64_t addr;
@@ -204,7 +207,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rte_pktmbuf_pkt_len(mbufs[i]) = len;
                rte_pktmbuf_data_len(mbufs[i]) = len;
                rx_bytes += len;
-               bufs[count++] = mbufs[i];
+               bufs[i] = mbufs[i];
 
                rte_ring_enqueue(umem->buf_ring, (void *)addr);
        }
@@ -215,7 +218,12 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        rxq->stats.rx_pkts += (rcvd - dropped);
        rxq->stats.rx_bytes += rx_bytes;
 
-       return count;
+out:
+       if (rcvd != nb_pkts)
+               rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
+                                    nb_pkts - rcvd);
+
+       return rcvd;
 }
 
 static void
@@ -262,7 +270,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct rte_mbuf *mbuf;
        void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
        unsigned long tx_bytes = 0;
-       int i, valid = 0;
+       int i;
        uint32_t idx_tx;
 
        nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
@@ -283,20 +291,18 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        for (i = 0; i < nb_pkts; i++) {
                struct xdp_desc *desc;
                void *pkt;
-               uint32_t buf_len = ETH_AF_XDP_FRAME_SIZE
-                                       - ETH_AF_XDP_DATA_HEADROOM;
+
                desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
                mbuf = bufs[i];
-               if (mbuf->pkt_len <= buf_len) {
-                       desc->addr = (uint64_t)addrs[valid];
-                       desc->len = mbuf->pkt_len;
-                       pkt = xsk_umem__get_data(umem->mz->addr,
-                                                desc->addr);
-                       rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
-                              desc->len);
-                       valid++;
-                       tx_bytes += mbuf->pkt_len;
-               }
+
+               desc->addr = (uint64_t)addrs[i];
+               desc->len = mbuf->pkt_len;
+               pkt = xsk_umem__get_data(umem->mz->addr,
+                                        desc->addr);
+               rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
+                          desc->len);
+               tx_bytes += mbuf->pkt_len;
+
                rte_pktmbuf_free(mbuf);
        }
 
@@ -304,12 +310,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        kick_tx(txq);
 
-       if (valid < nb_pkts)
-               rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid],
-                                nb_pkts - valid, NULL);
-
-       txq->stats.err_pkts += nb_pkts - valid;
-       txq->stats.tx_pkts += valid;
+       txq->stats.tx_pkts += nb_pkts;
        txq->stats.tx_bytes += tx_bytes;
 
        return nb_pkts;
@@ -425,6 +426,19 @@ remove_xdp_program(struct pmd_internals *internals)
                        XDP_FLAGS_UPDATE_IF_NOEXIST);
 }
 
+static void
+xdp_umem_destroy(struct xsk_umem_info *umem)
+{
+       rte_memzone_free(umem->mz);
+       umem->mz = NULL;
+
+       rte_ring_free(umem->buf_ring);
+       umem->buf_ring = NULL;
+
+       rte_free(umem);
+       umem = NULL;
+}
+
 static void
 eth_dev_close(struct rte_eth_dev *dev)
 {
@@ -443,6 +457,15 @@ eth_dev_close(struct rte_eth_dev *dev)
        }
 
        (void)xsk_umem__delete(internals->umem->umem);
+
+       /*
+        * MAC is not allocated dynamically, setting it to NULL would prevent
+        * from releasing it in rte_eth_dev_release_port.
+        */
+       dev->data->mac_addrs = NULL;
+
+       xdp_umem_destroy(internals->umem);
+
        remove_xdp_program(internals);
 }
 
@@ -458,21 +481,8 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
        return 0;
 }
 
-static void
-xdp_umem_destroy(struct xsk_umem_info *umem)
-{
-       rte_memzone_free(umem->mz);
-       umem->mz = NULL;
-
-       rte_ring_free(umem->buf_ring);
-       umem->buf_ring = NULL;
-
-       rte_free(umem);
-       umem = NULL;
-}
-
 static struct
-xsk_umem_info *xdp_umem_configure(void)
+xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
 {
        struct xsk_umem_info *umem;
        const struct rte_memzone *mz;
@@ -481,6 +491,8 @@ xsk_umem_info *xdp_umem_configure(void)
                .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
                .frame_size = ETH_AF_XDP_FRAME_SIZE,
                .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
+       char ring_name[RTE_RING_NAMESIZE];
+       char mz_name[RTE_MEMZONE_NAMESIZE];
        int ret;
        uint64_t i;
 
@@ -490,7 +502,9 @@ xsk_umem_info *xdp_umem_configure(void)
                return NULL;
        }
 
-       umem->buf_ring = rte_ring_create("af_xdp_ring",
+       snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
+                      internals->if_name, internals->queue_idx);
+       umem->buf_ring = rte_ring_create(ring_name,
                                         ETH_AF_XDP_NUM_BUFFERS,
                                         rte_socket_id(),
                                         0x0);
@@ -504,7 +518,9 @@ xsk_umem_info *xdp_umem_configure(void)
                                 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
                                          ETH_AF_XDP_DATA_HEADROOM));
 
-       mz = rte_memzone_reserve_aligned("af_xdp uemem",
+       snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
+                      internals->if_name, internals->queue_idx);
+       mz = rte_memzone_reserve_aligned(mz_name,
                        ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
                        rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
                        getpagesize());
@@ -540,7 +556,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
        int ret = 0;
        int reserve_size;
 
-       rxq->umem = xdp_umem_configure();
+       rxq->umem = xdp_umem_configure(internals);
        if (rxq->umem == NULL)
                return -ENOMEM;
 
@@ -782,7 +798,7 @@ free_kvlist:
 
 static int
 get_iface_info(const char *if_name,
-              struct ether_addr *eth_addr,
+              struct rte_ether_addr *eth_addr,
               int *if_index)
 {
        struct ifreq ifr;
@@ -849,6 +865,8 @@ init_internals(struct rte_vdev_device *dev,
        eth_dev->dev_ops = &ops;
        eth_dev->rx_pkt_burst = eth_af_xdp_rx;
        eth_dev->tx_pkt_burst = eth_af_xdp_tx;
+       /* Let rte_eth_dev_close() release the port resources. */
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
 
        return eth_dev;
 
@@ -916,7 +934,6 @@ static int
 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
 {
        struct rte_eth_dev *eth_dev = NULL;
-       struct pmd_internals *internals;
 
        AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
                rte_socket_id());
@@ -929,12 +946,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
        if (eth_dev == NULL)
                return -1;
 
-       internals = eth_dev->data->dev_private;
-
-       rte_ring_free(internals->umem->buf_ring);
-       rte_memzone_free(internals->umem->mz);
-       rte_free(internals->umem);
-
+       eth_dev_close(eth_dev);
        rte_eth_dev_release_port(eth_dev);