net/af_xdp: use recvfrom only if busy polling enabled
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
index 1c1e3ca..0c91a40 100644 (file)
@@ -5,7 +5,6 @@
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
-#include <poll.h>
 #include <netinet/in.h>
 #include <net/if.h>
 #include <sys/socket.h>
@@ -19,8 +18,8 @@
 #include <bpf/xsk.h>
 
 #include <rte_ethdev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
 #include <rte_kvargs.h>
 #include <rte_bus_vdev.h>
 #include <rte_string_fns.h>
 
 #include "compat.h"
 
+#ifndef SO_PREFER_BUSY_POLL
+#define SO_PREFER_BUSY_POLL 69
+#endif
+#ifndef SO_BUSY_POLL_BUDGET
+#define SO_BUSY_POLL_BUDGET 70
+#endif
+
 
 #ifndef SOL_XDP
 #define SOL_XDP 283
@@ -65,9 +71,11 @@ RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
 #define ETH_AF_XDP_DFLT_NUM_DESCS      XSK_RING_CONS__DEFAULT_NUM_DESCS
 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX        0
 #define ETH_AF_XDP_DFLT_QUEUE_COUNT    1
+#define ETH_AF_XDP_DFLT_BUSY_BUDGET    64
+#define ETH_AF_XDP_DFLT_BUSY_TIMEOUT   20
 
-#define ETH_AF_XDP_RX_BATCH_SIZE       32
-#define ETH_AF_XDP_TX_BATCH_SIZE       32
+#define ETH_AF_XDP_RX_BATCH_SIZE       XSK_RING_CONS__DEFAULT_NUM_DESCS
+#define ETH_AF_XDP_TX_BATCH_SIZE       XSK_RING_CONS__DEFAULT_NUM_DESCS
 
 
 struct xsk_umem_info {
@@ -100,6 +108,7 @@ struct pkt_rx_queue {
        struct pkt_tx_queue *pair;
        struct pollfd fds[1];
        int xsk_queue_idx;
+       int busy_budget;
 };
 
 struct tx_stats {
@@ -140,6 +149,7 @@ struct pmd_internals {
 #define ETH_AF_XDP_QUEUE_COUNT_ARG             "queue_count"
 #define ETH_AF_XDP_SHARED_UMEM_ARG             "shared_umem"
 #define ETH_AF_XDP_PROG_ARG                    "xdp_prog"
+#define ETH_AF_XDP_BUDGET_ARG                  "busy_budget"
 
 static const char * const valid_arguments[] = {
        ETH_AF_XDP_IFACE_ARG,
@@ -147,6 +157,7 @@ static const char * const valid_arguments[] = {
        ETH_AF_XDP_QUEUE_COUNT_ARG,
        ETH_AF_XDP_SHARED_UMEM_ARG,
        ETH_AF_XDP_PROG_ARG,
+       ETH_AF_XDP_BUDGET_ARG,
        NULL
 };
 
@@ -255,28 +266,29 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct xsk_umem_info *umem = rxq->umem;
        uint32_t idx_rx = 0;
        unsigned long rx_bytes = 0;
-       int rcvd, i;
+       int i;
        struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
 
+       nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+
+       if (nb_pkts == 0) {
+               rx_syscall_handler(&rxq->fq, rxq->busy_budget, &rxq->fds[0],
+                                  rxq->xsk);
+               return 0;
+       }
+
        /* allocate bufs for fill queue replenishment after rx */
        if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
                AF_XDP_LOG(DEBUG,
                        "Failed to get enough buffers for fq.\n");
+               /* rollback cached_cons which is added by
+                * xsk_ring_cons__peek
+                */
+               rx->cached_cons -= nb_pkts;
                return 0;
        }
 
-       rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
-
-       if (rcvd == 0) {
-#if defined(XDP_USE_NEED_WAKEUP)
-               if (xsk_ring_prod__needs_wakeup(fq))
-                       (void)poll(rxq->fds, 1, 1000);
-#endif
-
-               goto out;
-       }
-
-       for (i = 0; i < rcvd; i++) {
+       for (i = 0; i < nb_pkts; i++) {
                const struct xdp_desc *desc;
                uint64_t addr;
                uint32_t len;
@@ -301,20 +313,14 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rx_bytes += len;
        }
 
-       xsk_ring_cons__release(rx, rcvd);
-
-       (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq);
+       xsk_ring_cons__release(rx, nb_pkts);
+       (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
 
        /* statistics */
-       rxq->stats.rx_pkts += rcvd;
+       rxq->stats.rx_pkts += nb_pkts;
        rxq->stats.rx_bytes += rx_bytes;
 
-out:
-       if (rcvd != nb_pkts)
-               rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
-                                    nb_pkts - rcvd);
-
-       return rcvd;
+       return nb_pkts;
 }
 #else
 static uint16_t
@@ -326,28 +332,31 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        struct xsk_ring_prod *fq = &rxq->fq;
        uint32_t idx_rx = 0;
        unsigned long rx_bytes = 0;
-       int rcvd, i;
+       int i;
        uint32_t free_thresh = fq->size >> 1;
        struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
 
        if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
-               (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
-                                        NULL, fq);
+               (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
 
-       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
-               return 0;
-
-       rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
-       if (rcvd == 0) {
+       nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+       if (nb_pkts == 0) {
 #if defined(XDP_USE_NEED_WAKEUP)
                if (xsk_ring_prod__needs_wakeup(fq))
                        (void)poll(rxq->fds, 1, 1000);
 #endif
+               return 0;
+       }
 
-               goto out;
+       if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
+               /* rollback cached_cons which is added by
+                * xsk_ring_cons__peek
+                */
+               rx->cached_cons -= nb_pkts;
+               return 0;
        }
 
-       for (i = 0; i < rcvd; i++) {
+       for (i = 0; i < nb_pkts; i++) {
                const struct xdp_desc *desc;
                uint64_t addr;
                uint32_t len;
@@ -366,26 +375,19 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                bufs[i] = mbufs[i];
        }
 
-       xsk_ring_cons__release(rx, rcvd);
+       xsk_ring_cons__release(rx, nb_pkts);
 
        /* statistics */
-       rxq->stats.rx_pkts += rcvd;
+       rxq->stats.rx_pkts += nb_pkts;
        rxq->stats.rx_bytes += rx_bytes;
 
-out:
-       if (rcvd != nb_pkts)
-               rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
-                                    nb_pkts - rcvd);
-
-       return rcvd;
+       return nb_pkts;
 }
 #endif
 
 static uint16_t
-eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
-       nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
-
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
        return af_xdp_rx_zc(queue, bufs, nb_pkts);
 #else
@@ -393,6 +395,32 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 }
 
+static uint16_t
+eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+       uint16_t nb_rx;
+
+       if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
+               return af_xdp_rx(queue, bufs, nb_pkts);
+
+       /* Split larger batch into smaller batches of size
+        * ETH_AF_XDP_RX_BATCH_SIZE or less.
+        */
+       nb_rx = 0;
+       while (nb_pkts) {
+               uint16_t ret, n;
+
+               n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
+               ret = af_xdp_rx(queue, &bufs[nb_rx], n);
+               nb_rx = (uint16_t)(nb_rx + ret);
+               nb_pkts = (uint16_t)(nb_pkts - ret);
+               if (ret < n)
+                       break;
+       }
+
+       return nb_rx;
+}
+
 static void
 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
 {
@@ -424,9 +452,7 @@ kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
 
        pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
 
-#if defined(XDP_USE_NEED_WAKEUP)
-       if (xsk_ring_prod__needs_wakeup(&txq->tx))
-#endif
+       if (tx_syscall_needed(&txq->tx))
                while (send(xsk_socket__fd(txq->pair->xsk), NULL,
                            0, MSG_DONTWAIT) < 0) {
                        /* some thing unexpected */
@@ -538,8 +564,6 @@ af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        uint32_t idx_tx;
        struct xsk_ring_cons *cq = &txq->pair->cq;
 
-       nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
-
        pull_umem_cq(umem, nb_pkts, cq);
 
        nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
@@ -578,6 +602,32 @@ af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        return nb_pkts;
 }
+
+static uint16_t
+af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+       uint16_t nb_tx;
+
+       if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
+               return af_xdp_tx_cp(queue, bufs, nb_pkts);
+
+       nb_tx = 0;
+       while (nb_pkts) {
+               uint16_t ret, n;
+
+               /* Split larger batch into smaller batches of size
+                * ETH_AF_XDP_TX_BATCH_SIZE or less.
+                */
+               n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
+               ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
+               nb_tx = (uint16_t)(nb_tx + ret);
+               nb_pkts = (uint16_t)(nb_pkts - ret);
+               if (ret < n)
+                       break;
+       }
+
+       return nb_tx;
+}
 #endif
 
 static uint16_t
@@ -586,7 +636,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
        return af_xdp_tx_zc(queue, bufs, nb_pkts);
 #else
-       return af_xdp_tx_cp(queue, bufs, nb_pkts);
+       return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
 #endif
 }
 
@@ -749,6 +799,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
 #endif
 
+       dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
+       dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
        dev_info->default_rxportconf.nb_queues = 1;
        dev_info->default_txportconf.nb_queues = 1;
        dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
@@ -840,7 +892,6 @@ xdp_umem_destroy(struct xsk_umem_info *umem)
 #endif
 
        rte_free(umem);
-       umem = NULL;
 }
 
 static int
@@ -910,13 +961,13 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
 }
 
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
+static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
 {
        struct rte_mempool_memhdr *memhdr;
-       uint64_t memhdr_addr, aligned_addr;
+       uintptr_t memhdr_addr, aligned_addr;
 
        memhdr = STAILQ_FIRST(&mp->mem_list);
-       memhdr_addr = (uint64_t)memhdr->addr;
+       memhdr_addr = (uintptr_t)memhdr->addr;
        aligned_addr = memhdr_addr & ~(getpagesize() - 1);
        *align = memhdr_addr - aligned_addr;
 
@@ -968,7 +1019,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
 
                umem->mb_pool = mb_pool;
                base_addr = (void *)get_base_addr(mb_pool, &align);
-               umem_size = mb_pool->populated_size * usr_config.frame_size +
+               umem_size = (uint64_t)mb_pool->populated_size *
+                               (uint64_t)usr_config.frame_size +
                                align;
 
                ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
@@ -1096,6 +1148,65 @@ load_custom_xdp_prog(const char *prog_path, int if_index)
        return 0;
 }
 
+/* Detect support for busy polling through setsockopt(). */
+static int
+configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
+{
+       int sock_opt = 1;
+       int fd = xsk_socket__fd(rxq->xsk);
+       int ret = 0;
+
+       ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
+               goto err_prefer;
+       }
+
+       sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
+                       sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
+               goto err_timeout;
+       }
+
+       sock_opt = rxq->busy_budget;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
+       } else {
+               AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
+                                       rxq->busy_budget);
+               return 0;
+       }
+
+       /* setsockopt failure - attempt to restore xsk to default state and
+        * proceed without busy polling support.
+        */
+       sock_opt = 0;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
+                       sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
+               return -1;
+       }
+
+err_timeout:
+       sock_opt = 0;
+       ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
+               return -1;
+       }
+
+err_prefer:
+       rxq->busy_budget = 0;
+       return 0;
+}
+
 static int
 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
              int ring_size)
@@ -1148,11 +1259,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
        }
 
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-       if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
+       ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
+       if (ret) {
                AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
                goto err;
        }
 #endif
+
+       if (rxq->busy_budget) {
+               ret = configure_preferred_busy_poll(rxq);
+               if (ret) {
+                       AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
+                       goto err;
+               }
+       }
+
        ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
        if (ret) {
                xsk_socket__delete(rxq->xsk);
@@ -1210,6 +1331,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
                goto err;
        }
 
+       if (!rxq->busy_budget)
+               AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
+
        rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
        rxq->fds[0].events = POLLIN;
 
@@ -1316,6 +1440,24 @@ static const struct eth_dev_ops ops = {
        .stats_reset = eth_stats_reset,
 };
 
+/** parse busy_budget argument */
+static int
+parse_budget_arg(const char *key __rte_unused,
+                 const char *value, void *extra_args)
+{
+       int *i = (int *)extra_args;
+       char *end;
+
+       *i = strtol(value, &end, 10);
+       if (*i < 0 || *i > UINT16_MAX) {
+               AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
+                               *i, UINT16_MAX);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /** parse integer from integer argument */
 static int
 parse_integer_arg(const char *key __rte_unused,
@@ -1418,7 +1560,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues,
 
 static int
 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
-                       int *queue_cnt, int *shared_umem, char *prog_path)
+                       int *queue_cnt, int *shared_umem, char *prog_path,
+                       int *busy_budget)
 {
        int ret;
 
@@ -1449,6 +1592,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
        if (ret < 0)
                goto free_kvlist;
 
+       ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
+                               &parse_budget_arg, busy_budget);
+       if (ret < 0)
+               goto free_kvlist;
+
 free_kvlist:
        rte_kvargs_free(kvlist);
        return ret;
@@ -1487,7 +1635,7 @@ error:
 static struct rte_eth_dev *
 init_internals(struct rte_vdev_device *dev, const char *if_name,
                int start_queue_idx, int queue_cnt, int shared_umem,
-               const char *prog_path)
+               const char *prog_path, int busy_budget)
 {
        const char *name = rte_vdev_device_name(dev);
        const unsigned int numa_node = dev->device.numa_node;
@@ -1548,6 +1696,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
                internals->rx_queues[i].pair = &internals->tx_queues[i];
                internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
                internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
+               internals->rx_queues[i].busy_budget = busy_budget;
        }
 
        ret = get_iface_info(if_name, &internals->eth_addr,
@@ -1562,6 +1711,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
        eth_dev->data->dev_private = internals;
        eth_dev->data->dev_link = pmd_link;
        eth_dev->data->mac_addrs = &internals->eth_addr;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->dev_ops = &ops;
        eth_dev->rx_pkt_burst = eth_af_xdp_rx;
        eth_dev->tx_pkt_burst = eth_af_xdp_tx;
@@ -1590,6 +1740,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
        int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
        int shared_umem = 0;
        char prog_path[PATH_MAX] = {'\0'};
+       int busy_budget = -1;
        struct rte_eth_dev *eth_dev = NULL;
        const char *name;
 
@@ -1619,7 +1770,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
                dev->device.numa_node = rte_socket_id();
 
        if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
-                            &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
+                            &xsk_queue_cnt, &shared_umem, prog_path,
+                            &busy_budget) < 0) {
                AF_XDP_LOG(ERR, "Invalid kvargs value\n");
                return -EINVAL;
        }
@@ -1629,8 +1781,12 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
                return -EINVAL;
        }
 
+       busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
+                                       busy_budget;
+
        eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
-                                       xsk_queue_cnt, shared_umem, prog_path);
+                                       xsk_queue_cnt, shared_umem, prog_path,
+                                       busy_budget);
        if (eth_dev == NULL) {
                AF_XDP_LOG(ERR, "Failed to init internals\n");
                return -1;
@@ -1675,4 +1831,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
                              "start_queue=<int> "
                              "queue_count=<int> "
                              "shared_umem=<int> "
-                             "xdp_prog=<string> ");
+                             "xdp_prog=<string> "
+                             "busy_budget=<int>");