net/af_xdp: make UMEM configure more readable
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
index 9c0e935..802f912 100644 (file)
@@ -5,7 +5,6 @@
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
-#include <poll.h>
 #include <netinet/in.h>
 #include <net/if.h>
 #include <sys/socket.h>
@@ -16,7 +15,7 @@
 #include <linux/ethtool.h>
 #include <linux/sockios.h>
 #include "af_xdp_deps.h"
-#include <bpf/xsk.h>
+#include <bpf/bpf.h>
 
 #include <rte_ethdev.h>
 #include <ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_ring.h>
 #include <rte_spinlock.h>
+#include <rte_power_intrinsics.h>
 
 #include "compat.h"
 
+#ifndef SO_PREFER_BUSY_POLL
+#define SO_PREFER_BUSY_POLL 69
+#endif
+#ifndef SO_BUSY_POLL_BUDGET
+#define SO_BUSY_POLL_BUDGET 70
+#endif
+
 
 #ifndef SOL_XDP
 #define SOL_XDP 283
@@ -54,7 +61,7 @@
 #define PF_XDP AF_XDP
 #endif
 
-RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
 
 #define AF_XDP_LOG(level, fmt, args...)                        \
        rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
@@ -65,10 +72,13 @@ RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
 #define ETH_AF_XDP_DFLT_NUM_DESCS      XSK_RING_CONS__DEFAULT_NUM_DESCS
 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX        0
 #define ETH_AF_XDP_DFLT_QUEUE_COUNT    1
+#define ETH_AF_XDP_DFLT_BUSY_BUDGET    64
+#define ETH_AF_XDP_DFLT_BUSY_TIMEOUT   20
 
 #define ETH_AF_XDP_RX_BATCH_SIZE       XSK_RING_CONS__DEFAULT_NUM_DESCS
 #define ETH_AF_XDP_TX_BATCH_SIZE       XSK_RING_CONS__DEFAULT_NUM_DESCS
 
+#define ETH_AF_XDP_ETH_OVERHEAD                (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
 struct xsk_umem_info {
        struct xsk_umem *umem;
@@ -100,6 +110,7 @@ struct pkt_rx_queue {
        struct pkt_tx_queue *pair;
        struct pollfd fds[1];
        int xsk_queue_idx;
+       int busy_budget;
 };
 
 struct tx_stats {
@@ -128,6 +139,7 @@ struct pmd_internals {
        bool shared_umem;
        char prog_path[PATH_MAX];
        bool custom_prog_configured;
+       struct bpf_map *map;
 
        struct rte_ether_addr eth_addr;
 
@@ -140,6 +152,7 @@ struct pmd_internals {
 #define ETH_AF_XDP_QUEUE_COUNT_ARG             "queue_count"
 #define ETH_AF_XDP_SHARED_UMEM_ARG             "shared_umem"
 #define ETH_AF_XDP_PROG_ARG                    "xdp_prog"
+#define ETH_AF_XDP_BUDGET_ARG                  "busy_budget"
 
 static const char * const valid_arguments[] = {
        ETH_AF_XDP_IFACE_ARG,
@@ -147,14 +160,15 @@ static const char * const valid_arguments[] = {
        ETH_AF_XDP_QUEUE_COUNT_ARG,
        ETH_AF_XDP_SHARED_UMEM_ARG,
        ETH_AF_XDP_PROG_ARG,
+       ETH_AF_XDP_BUDGET_ARG,
        NULL
 };
 
 static const struct rte_eth_link pmd_link = {
-       .link_speed = ETH_SPEED_NUM_10G,
-       .link_duplex = ETH_LINK_FULL_DUPLEX,
-       .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_AUTONEG
+       .link_speed = RTE_ETH_SPEED_NUM_10G,
+       .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+       .link_status = RTE_ETH_LINK_DOWN,
+       .link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -261,11 +275,17 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
 
        if (nb_pkts == 0) {
-#if defined(XDP_USE_NEED_WAKEUP)
-               if (xsk_ring_prod__needs_wakeup(fq))
-                       recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
-                               MSG_DONTWAIT, NULL, NULL);
-#endif
+               /* we can assume a kernel >= 5.11 is in use if busy polling is
+                * enabled and thus we can safely use the recvfrom() syscall
+                * which is only supported for AF_XDP sockets in kernels >=
+                * 5.11.
+                */
+               if (rxq->busy_budget) {
+                       (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
+                                      MSG_DONTWAIT, NULL, NULL);
+               } else if (xsk_ring_prod__needs_wakeup(fq)) {
+                       (void)poll(&rxq->fds[0], 1, 1000);
+               }
 
                return 0;
        }
@@ -336,8 +356,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        if (nb_pkts == 0) {
 #if defined(XDP_USE_NEED_WAKEUP)
                if (xsk_ring_prod__needs_wakeup(fq))
-                       recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
-                               MSG_DONTWAIT, NULL, NULL);
+                       (void)poll(rxq->fds, 1, 1000);
 #endif
                return 0;
        }
@@ -446,9 +465,7 @@ kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
 
        pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
 
-#if defined(XDP_USE_NEED_WAKEUP)
-       if (xsk_ring_prod__needs_wakeup(&txq->tx))
-#endif
+       if (tx_syscall_needed(&txq->tx))
                while (send(xsk_socket__fd(txq->pair->xsk), NULL,
                            0, MSG_DONTWAIT) < 0) {
                        /* some thing unexpected */
@@ -512,7 +529,6 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
                        if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
                                rte_pktmbuf_free(local_mbuf);
-                               kick_tx(txq, cq);
                                goto out;
                        }
 
@@ -536,10 +552,9 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                tx_bytes += mbuf->pkt_len;
        }
 
-       kick_tx(txq, cq);
-
 out:
        xsk_ring_prod__submit(&txq->tx, count);
+       kick_tx(txq, cq);
 
        txq->stats.tx_pkts += count;
        txq->stats.tx_bytes += tx_bytes;
@@ -639,7 +654,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = ETH_LINK_UP;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
        return 0;
 }
@@ -648,7 +663,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = ETH_LINK_DOWN;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
        return 0;
 }
 
@@ -681,67 +696,6 @@ find_internal_resource(struct pmd_internals *port_int)
        return list;
 }
 
-/* Check if the netdev,qid context already exists */
-static inline bool
-ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
-               struct pkt_rx_queue *list_rxq, const char *list_ifname)
-{
-       bool exists = false;
-
-       if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
-                       !strncmp(ifname, list_ifname, IFNAMSIZ)) {
-               AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
-                                       ifname, rxq->xsk_queue_idx);
-               exists = true;
-       }
-
-       return exists;
-}
-
-/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
-static inline int
-get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
-                       struct xsk_umem_info **umem)
-{
-       struct internal_list *list;
-       struct pmd_internals *internals;
-       int i = 0, ret = 0;
-       struct rte_mempool *mb_pool = rxq->mb_pool;
-
-       if (mb_pool == NULL)
-               return ret;
-
-       pthread_mutex_lock(&internal_list_lock);
-
-       TAILQ_FOREACH(list, &internal_list, next) {
-               internals = list->eth_dev->data->dev_private;
-               for (i = 0; i < internals->queue_cnt; i++) {
-                       struct pkt_rx_queue *list_rxq =
-                                               &internals->rx_queues[i];
-                       if (rxq == list_rxq)
-                               continue;
-                       if (mb_pool == internals->rx_queues[i].mb_pool) {
-                               if (ctx_exists(rxq, ifname, list_rxq,
-                                               internals->if_name)) {
-                                       ret = -1;
-                                       goto out;
-                               }
-                               if (__atomic_load_n(
-                                       &internals->rx_queues[i].umem->refcnt,
-                                                       __ATOMIC_ACQUIRE)) {
-                                       *umem = internals->rx_queues[i].umem;
-                                       goto out;
-                               }
-                       }
-               }
-       }
-
-out:
-       pthread_mutex_unlock(&internal_list_lock);
-
-       return ret;
-}
-
 static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
@@ -774,6 +728,38 @@ eth_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+#define CLB_VAL_IDX 0
+static int
+eth_monitor_callback(const uint64_t value,
+               const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
+{
+       const uint64_t v = opaque[CLB_VAL_IDX];
+       const uint64_t m = (uint32_t)~0;
+
+       /* if the value has changed, abort entering power optimized state */
+       return (value & m) == v ? 0 : -1;
+}
+
+static int
+eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+       struct pkt_rx_queue *rxq = rx_queue;
+       unsigned int *prod = rxq->rx.producer;
+       const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
+
+       /* watch for changes in producer ring */
+       pmc->addr = (void *)prod;
+
+       /* store current value */
+       pmc->opaque[CLB_VAL_IDX] = cur_val;
+       pmc->fn = eth_monitor_callback;
+
+       /* AF_XDP producer ring index is 32-bit */
+       pmc->size = sizeof(uint32_t);
+
+       return 0;
+}
+
 static int
 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -781,20 +767,22 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->if_index = internals->if_index;
        dev_info->max_mac_addrs = 1;
-       dev_info->max_rx_pktlen = ETH_FRAME_LEN;
        dev_info->max_rx_queues = internals->queue_cnt;
        dev_info->max_tx_queues = internals->queue_cnt;
 
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-       dev_info->max_mtu = getpagesize() -
-                               sizeof(struct rte_mempool_objhdr) -
-                               sizeof(struct rte_mbuf) -
-                               RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
+       dev_info->max_rx_pktlen = getpagesize() -
+                                 sizeof(struct rte_mempool_objhdr) -
+                                 sizeof(struct rte_mbuf) -
+                                 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
 #else
-       dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
+       dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
 #endif
+       dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
 
+       dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
+       dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
        dev_info->default_rxportconf.nb_queues = 1;
        dev_info->default_txportconf.nb_queues = 1;
        dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
@@ -942,11 +930,6 @@ eth_dev_close(struct rte_eth_dev *dev)
        return 0;
 }
 
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
 static int
 eth_link_update(struct rte_eth_dev *dev __rte_unused,
                int wait_to_complete __rte_unused)
@@ -968,6 +951,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
        return aligned_addr;
 }
 
+/* Check if the netdev,qid context already exists */
+static inline bool
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+               struct pkt_rx_queue *list_rxq, const char *list_ifname)
+{
+       bool exists = false;
+
+       if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+                       !strncmp(ifname, list_ifname, IFNAMSIZ)) {
+               AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+                                       ifname, rxq->xsk_queue_idx);
+               exists = true;
+       }
+
+       return exists;
+}
+
+/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
+static inline int
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+                       struct xsk_umem_info **umem)
+{
+       struct internal_list *list;
+       struct pmd_internals *internals;
+       int i = 0, ret = 0;
+       struct rte_mempool *mb_pool = rxq->mb_pool;
+
+       if (mb_pool == NULL)
+               return ret;
+
+       pthread_mutex_lock(&internal_list_lock);
+
+       TAILQ_FOREACH(list, &internal_list, next) {
+               internals = list->eth_dev->data->dev_private;
+               for (i = 0; i < internals->queue_cnt; i++) {
+                       struct pkt_rx_queue *list_rxq =
+                                               &internals->rx_queues[i];
+                       if (rxq == list_rxq)
+                               continue;
+                       if (mb_pool == internals->rx_queues[i].mb_pool) {
+                               if (ctx_exists(rxq, ifname, list_rxq,
+                                               internals->if_name)) {
+                                       ret = -1;
+                                       goto out;
+                               }
+                               if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
+                                                   __ATOMIC_ACQUIRE)) {
+                                       *umem = internals->rx_queues[i].umem;
+                                       goto out;
+                               }
+                       }
+               }
+       }
+
+out:
+       pthread_mutex_unlock(&internal_list_lock);
+
+       return ret;
+}
+
 static struct
 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
                                  struct pkt_rx_queue *rxq)
@@ -1035,6 +1078,12 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
                __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
        }
 
+       return umem;
+
+err:
+       xdp_umem_destroy(umem);
+       return NULL;
+}
 #else
 static struct
 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
@@ -1095,20 +1144,19 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
        }
        umem->mz = mz;
 
-#endif
        return umem;
 
 err:
        xdp_umem_destroy(umem);
        return NULL;
 }
+#endif
 
 static int
-load_custom_xdp_prog(const char *prog_path, int if_index)
+load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
 {
        int ret, prog_fd = -1;
        struct bpf_object *obj;
-       struct bpf_map *map;
 
        ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
        if (ret) {
@@ -1118,11 +1166,10 @@ load_custom_xdp_prog(const char *prog_path, int if_index)
 
        /*
         * The loaded program must provision for a map of xsks, such that some
-        * traffic can be redirected to userspace. When the xsk is created,
-        * libbpf inserts it into the map.
+        * traffic can be redirected to userspace.
         */
-       map = bpf_object__find_map_by_name(obj, "xsks_map");
-       if (!map) {
+       *map = bpf_object__find_map_by_name(obj, "xsks_map");
+       if (!*map) {
                AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
                return -1;
        }
@@ -1142,6 +1189,65 @@ load_custom_xdp_prog(const char *prog_path, int if_index)
        return 0;
 }
 
+/* Detect support for busy polling through setsockopt(). */
+static int
+configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
+{
+       int sock_opt = 1;
+       int fd = xsk_socket__fd(rxq->xsk);
+       int ret = 0;
+
+       ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
+               goto err_prefer;
+       }
+
+       sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
+                       sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
+               goto err_timeout;
+       }
+
+       sock_opt = rxq->busy_budget;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
+       } else {
+               AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
+                                       rxq->busy_budget);
+               return 0;
+       }
+
+       /* setsockopt failure - attempt to restore xsk to default state and
+        * proceed without busy polling support.
+        */
+       sock_opt = 0;
+       ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
+                       sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
+               return -1;
+       }
+
+err_timeout:
+       sock_opt = 0;
+       ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
+                       (void *)&sock_opt, sizeof(sock_opt));
+       if (ret < 0) {
+               AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
+               return -1;
+       }
+
+err_prefer:
+       rxq->busy_budget = 0;
+       return 0;
+}
+
 static int
 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
              int ring_size)
@@ -1170,13 +1276,15 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
        if (strnlen(internals->prog_path, PATH_MAX) &&
                                !internals->custom_prog_configured) {
                ret = load_custom_xdp_prog(internals->prog_path,
-                                          internals->if_index);
+                                          internals->if_index,
+                                          &internals->map);
                if (ret) {
                        AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
                                        internals->prog_path);
                        goto err;
                }
                internals->custom_prog_configured = 1;
+               cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
        }
 
        if (internals->shared_umem)
@@ -1193,6 +1301,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
                goto err;
        }
 
+       /* insert the xsk into the xsks_map */
+       if (internals->custom_prog_configured) {
+               int err, fd;
+
+               fd = xsk_socket__fd(rxq->xsk);
+               err = bpf_map_update_elem(bpf_map__fd(internals->map),
+                                         &rxq->xsk_queue_idx, &fd, 0);
+               if (err) {
+                       AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+                       goto err;
+               }
+       }
+
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
        ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
        if (ret) {
@@ -1200,6 +1321,15 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
                goto err;
        }
 #endif
+
+       if (rxq->busy_budget) {
+               ret = configure_preferred_busy_poll(rxq);
+               if (ret) {
+                       AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
+                       goto err;
+               }
+       }
+
        ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
        if (ret) {
                xsk_socket__delete(rxq->xsk);
@@ -1257,6 +1387,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
                goto err;
        }
 
+       if (!rxq->busy_budget)
+               AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
+
        rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
        rxq->fds[0].events = POLLIN;
 
@@ -1356,13 +1489,30 @@ static const struct eth_dev_ops ops = {
        .promiscuous_disable = eth_dev_promiscuous_disable,
        .rx_queue_setup = eth_rx_queue_setup,
        .tx_queue_setup = eth_tx_queue_setup,
-       .rx_queue_release = eth_queue_release,
-       .tx_queue_release = eth_queue_release,
        .link_update = eth_link_update,
        .stats_get = eth_stats_get,
        .stats_reset = eth_stats_reset,
+       .get_monitor_addr = eth_get_monitor_addr,
 };
 
+/** parse busy_budget argument */
+static int
+parse_budget_arg(const char *key __rte_unused,
+                 const char *value, void *extra_args)
+{
+       int *i = (int *)extra_args;
+       char *end;
+
+       *i = strtol(value, &end, 10);
+       if (*i < 0 || *i > UINT16_MAX) {
+               AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
+                               *i, UINT16_MAX);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /** parse integer from integer argument */
 static int
 parse_integer_arg(const char *key __rte_unused,
@@ -1465,7 +1615,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues,
 
 static int
 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
-                       int *queue_cnt, int *shared_umem, char *prog_path)
+                       int *queue_cnt, int *shared_umem, char *prog_path,
+                       int *busy_budget)
 {
        int ret;
 
@@ -1496,6 +1647,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
        if (ret < 0)
                goto free_kvlist;
 
+       ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
+                               &parse_budget_arg, busy_budget);
+       if (ret < 0)
+               goto free_kvlist;
+
 free_kvlist:
        rte_kvargs_free(kvlist);
        return ret;
@@ -1534,7 +1690,7 @@ error:
 static struct rte_eth_dev *
 init_internals(struct rte_vdev_device *dev, const char *if_name,
                int start_queue_idx, int queue_cnt, int shared_umem,
-               const char *prog_path)
+               const char *prog_path, int busy_budget)
 {
        const char *name = rte_vdev_device_name(dev);
        const unsigned int numa_node = dev->device.numa_node;
@@ -1595,6 +1751,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
                internals->rx_queues[i].pair = &internals->tx_queues[i];
                internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
                internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
+               internals->rx_queues[i].busy_budget = busy_budget;
        }
 
        ret = get_iface_info(if_name, &internals->eth_addr,
@@ -1638,6 +1795,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
        int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
        int shared_umem = 0;
        char prog_path[PATH_MAX] = {'\0'};
+       int busy_budget = -1;
        struct rte_eth_dev *eth_dev = NULL;
        const char *name;
 
@@ -1645,16 +1803,11 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
                rte_vdev_device_name(dev));
 
        name = rte_vdev_device_name(dev);
-       if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
-               strlen(rte_vdev_device_args(dev)) == 0) {
-               eth_dev = rte_eth_dev_attach_secondary(name);
-               if (eth_dev == NULL) {
-                       AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
-                       return -EINVAL;
-               }
-               eth_dev->dev_ops = &ops;
-               rte_eth_dev_probing_finish(eth_dev);
-               return 0;
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+               AF_XDP_LOG(ERR, "Failed to probe %s. "
+                               "AF_XDP PMD does not support secondary processes.\n",
+                               name);
+               return -ENOTSUP;
        }
 
        kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
@@ -1667,7 +1820,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
                dev->device.numa_node = rte_socket_id();
 
        if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
-                            &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
+                            &xsk_queue_cnt, &shared_umem, prog_path,
+                            &busy_budget) < 0) {
                AF_XDP_LOG(ERR, "Invalid kvargs value\n");
                return -EINVAL;
        }
@@ -1677,8 +1831,12 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
                return -EINVAL;
        }
 
+       busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
+                                       busy_budget;
+
        eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
-                                       xsk_queue_cnt, shared_umem, prog_path);
+                                       xsk_queue_cnt, shared_umem, prog_path,
+                                       busy_budget);
        if (eth_dev == NULL) {
                AF_XDP_LOG(ERR, "Failed to init internals\n");
                return -1;
@@ -1723,4 +1881,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
                              "start_queue=<int> "
                              "queue_count=<int> "
                              "shared_umem=<int> "
-                             "xdp_prog=<string> ");
+                             "xdp_prog=<string> "
+                             "busy_budget=<int>");