X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Faf_xdp%2Frte_eth_af_xdp.c;h=3957227bf01cb592f8721fd95177539d35f418b6;hb=0984d196be2a92eb6e2e0b926fdb4a06a1d7d823;hp=01f462b4656e431b216f84c7829b1ed8a41898e5;hpb=74b46340e2d4f6b4b00476ed3790580004f21341;p=dpdk.git diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 01f462b465..3957227bf0 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -126,6 +126,8 @@ struct pmd_internals { int max_queue_cnt; int combined_queue_cnt; bool shared_umem; + char prog_path[PATH_MAX]; + bool custom_prog_configured; struct rte_ether_addr eth_addr; @@ -137,12 +139,14 @@ struct pmd_internals { #define ETH_AF_XDP_START_QUEUE_ARG "start_queue" #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count" #define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem" +#define ETH_AF_XDP_PROG_ARG "xdp_prog" static const char * const valid_arguments[] = { ETH_AF_XDP_IFACE_ARG, ETH_AF_XDP_START_QUEUE_ARG, ETH_AF_XDP_QUEUE_COUNT_ARG, ETH_AF_XDP_SHARED_UMEM_ARG, + ETH_AF_XDP_PROG_ARG, NULL }; @@ -251,28 +255,32 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_umem_info *umem = rxq->umem; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; - /* allocate bufs for fill queue replenishment after rx */ - if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { - AF_XDP_LOG(DEBUG, - "Failed to get enough buffers for fq.\n"); - return 0; - } + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - - if (rcvd == 0) { + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif - goto out; + return 0; + } + + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); + /* rollback cached_cons which is added by + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -297,20 +305,14 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rx_bytes += len; } - xsk_ring_cons__release(rx, rcvd); - - (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq); + xsk_ring_cons__release(rx, nb_pkts); + (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #else static uint16_t @@ -322,7 +324,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_ring_prod *fq = &rxq->fq; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; uint32_t free_thresh = fq->size >> 1; struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; @@ -330,20 +332,24 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL, fq); - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) - return 0; - - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - if (rcvd == 0) { + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif + return 0; + } - goto out; + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) { + /* rollback cached_cons which is added by + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -362,18 +368,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[i] = mbufs[i]; } - xsk_ring_cons__release(rx, rcvd); + xsk_ring_cons__release(rx, nb_pkts); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #endif @@ -595,10 +596,11 @@ eth_dev_start(struct rte_eth_dev *dev) } /* This function gets called when the current port gets stopped. */ -static void +static int eth_dev_stop(struct rte_eth_dev *dev) { dev->data->dev_link.link_status = ETH_LINK_DOWN; + return 0; } /* Find ethdev in list */ @@ -630,16 +632,35 @@ find_internal_resource(struct pmd_internals *port_int) return list; } +/* Check if the netdev,qid context already exists */ +static inline bool +ctx_exists(struct pkt_rx_queue *rxq, const char *ifname, + struct pkt_rx_queue *list_rxq, const char *list_ifname) +{ + bool exists = false; + + if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx && + !strncmp(ifname, list_ifname, IFNAMSIZ)) { + AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n", + ifname, rxq->xsk_queue_idx); + exists = true; + } + + return exists; +} + /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ -static inline struct xsk_umem_info * -get_shared_umem(struct pkt_rx_queue *rxq) { +static inline int +get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, + struct xsk_umem_info **umem) +{ struct internal_list *list; struct pmd_internals *internals; - int i = 0; + int i = 0, ret = 0; struct rte_mempool *mb_pool = rxq->mb_pool; if (mb_pool == NULL) - return NULL; + return ret; pthread_mutex_lock(&internal_list_lock); @@ -651,20 +672,25 @@ get_shared_umem(struct pkt_rx_queue *rxq) { if (rxq == list_rxq) continue; if (mb_pool == internals->rx_queues[i].mb_pool) { + if (ctx_exists(rxq, ifname, list_rxq, + internals->if_name)) { + ret = -1; + goto out; + } if (__atomic_load_n( &internals->rx_queues[i].umem->refcnt, __ATOMIC_ACQUIRE)) { - pthread_mutex_unlock( - &internal_list_lock); - return internals->rx_queues[i].umem; + *umem = internals->rx_queues[i].umem; + goto out; } } } } +out: pthread_mutex_unlock(&internal_list_lock); - return NULL; + return ret; } static int @@ -811,16 +837,18 @@ xdp_umem_destroy(struct xsk_umem_info *umem) #endif rte_free(umem); - umem = NULL; } -static void +static int eth_dev_close(struct rte_eth_dev *dev) { struct pmd_internals *internals = dev->data->dev_private; struct pkt_rx_queue *rxq; int i; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n", rte_socket_id()); @@ -861,6 +889,8 @@ eth_dev_close(struct rte_eth_dev *dev) rte_free(list); } } + + return 0; } static void @@ -876,13 +906,13 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, } #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) -static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align) +static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) { struct rte_mempool_memhdr *memhdr; - uint64_t memhdr_addr, aligned_addr; + uintptr_t memhdr_addr, aligned_addr; memhdr = STAILQ_FIRST(&mp->mem_list); - memhdr_addr = (uint64_t)memhdr->addr; + memhdr_addr = (uintptr_t)memhdr->addr; aligned_addr = memhdr_addr & ~(getpagesize() - 1); *align = memhdr_addr - aligned_addr; @@ -904,7 +934,9 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, uint64_t umem_size, align = 0; if (internals->shared_umem) { - umem = get_shared_umem(rxq); + if (get_shared_umem(rxq, internals->if_name, &umem) < 0) + return NULL; + if (umem != NULL && __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) < umem->max_xsks) { @@ -932,7 +964,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem->mb_pool = mb_pool; base_addr = (void *)get_base_addr(mb_pool, &align); - umem_size = mb_pool->populated_size * usr_config.frame_size + + umem_size = (uint64_t)mb_pool->populated_size * + (uint64_t)usr_config.frame_size + align; ret = xsk_umem__create(&umem->umem, base_addr, umem_size, @@ -1021,6 +1054,45 @@ err: return NULL; } +static int +load_custom_xdp_prog(const char *prog_path, int if_index) +{ + int ret, prog_fd = -1; + struct bpf_object *obj; + struct bpf_map *map; + + ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (ret) { + AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path); + return ret; + } + + /* + * The loaded program must provision for a map of xsks, such that some + * traffic can be redirected to userspace. When the xsk is created, + * libbpf inserts it into the map. + */ + map = bpf_object__find_map_by_name(obj, "xsks_map"); + if (!map) { + AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path); + return -1; + } + + /* Link the program with the given network device */ + ret = bpf_set_link_xdp_fd(if_index, prog_fd, + XDP_FLAGS_UPDATE_IF_NOEXIST); + if (ret) { + AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n", + prog_fd); + return -1; + } + + AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n", + prog_path, prog_fd); + + return 0; +} + static int xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, int ring_size) @@ -1046,6 +1118,18 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, cfg.bind_flags |= XDP_USE_NEED_WAKEUP; #endif + if (strnlen(internals->prog_path, PATH_MAX) && + !internals->custom_prog_configured) { + ret = load_custom_xdp_prog(internals->prog_path, + internals->if_index); + if (ret) { + AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n", + internals->prog_path); + goto err; + } + internals->custom_prog_configured = 1; + } + if (internals->shared_umem) ret = create_shared_socket(&rxq->xsk, internals->if_name, rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx, @@ -1061,7 +1145,8 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, } #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) - if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) { + ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); + if (ret) { AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); goto err; } @@ -1264,6 +1349,30 @@ parse_name_arg(const char *key __rte_unused, return 0; } +/** parse xdp prog argument */ +static int +parse_prog_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + char *path = extra_args; + + if (strnlen(value, PATH_MAX) == PATH_MAX) { + AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n", + value, PATH_MAX); + return -EINVAL; + } + + if (access(value, F_OK) != 0) { + AF_XDP_LOG(ERR, "Error accessing %s: %s\n", + value, strerror(errno)); + return -EINVAL; + } + + strlcpy(path, value, PATH_MAX); + + return 0; +} + static int xdp_get_channels_info(const char *if_name, int *max_queues, int *combined_queues) @@ -1278,7 +1387,7 @@ xdp_get_channels_info(const char *if_name, int *max_queues, channels.cmd = ETHTOOL_GCHANNELS; ifr.ifr_data = (void *)&channels; - strncpy(ifr.ifr_name, if_name, IFNAMSIZ); + strlcpy(ifr.ifr_name, if_name, IFNAMSIZ); ret = ioctl(fd, SIOCETHTOOL, &ifr); if (ret) { if (errno == EOPNOTSUPP) { @@ -1307,7 +1416,7 @@ xdp_get_channels_info(const char *if_name, int *max_queues, static int parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, - int *queue_cnt, int *shared_umem) + int *queue_cnt, int *shared_umem, char *prog_path) { int ret; @@ -1333,6 +1442,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, if (ret < 0) goto free_kvlist; + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG, + &parse_prog_arg, prog_path); + if (ret < 0) + goto free_kvlist; + free_kvlist: rte_kvargs_free(kvlist); return ret; @@ -1370,7 +1484,8 @@ error: static struct rte_eth_dev * init_internals(struct rte_vdev_device *dev, const char *if_name, - int start_queue_idx, int queue_cnt, int shared_umem) + int start_queue_idx, int queue_cnt, int shared_umem, + const char *prog_path) { const char *name = rte_vdev_device_name(dev); const unsigned int numa_node = dev->device.numa_node; @@ -1386,6 +1501,8 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, internals->start_queue_idx = start_queue_idx; internals->queue_cnt = queue_cnt; strlcpy(internals->if_name, if_name, IFNAMSIZ); + strlcpy(internals->prog_path, prog_path, PATH_MAX); + internals->custom_prog_configured = 0; #ifndef ETH_AF_XDP_SHARED_UMEM if (shared_umem) { @@ -1443,11 +1560,10 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, eth_dev->data->dev_private = internals; eth_dev->data->dev_link = pmd_link; eth_dev->data->mac_addrs = &internals->eth_addr; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; eth_dev->dev_ops = &ops; eth_dev->rx_pkt_burst = eth_af_xdp_rx; eth_dev->tx_pkt_burst = eth_af_xdp_tx; - /* Let rte_eth_dev_close() release the port resources. */ - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n"); @@ -1472,6 +1588,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX; int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT; int shared_umem = 0; + char prog_path[PATH_MAX] = {'\0'}; struct rte_eth_dev *eth_dev = NULL; const char *name; @@ -1501,7 +1618,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) dev->device.numa_node = rte_socket_id(); if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx, - &xsk_queue_cnt, &shared_umem) < 0) { + &xsk_queue_cnt, &shared_umem, prog_path) < 0) { AF_XDP_LOG(ERR, "Invalid kvargs value\n"); return -EINVAL; } @@ -1512,7 +1629,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) } eth_dev = init_internals(dev, if_name, xsk_start_queue_idx, - xsk_queue_cnt, shared_umem); + xsk_queue_cnt, shared_umem, prog_path); if (eth_dev == NULL) { AF_XDP_LOG(ERR, "Failed to init internals\n"); return -1; @@ -1556,4 +1673,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp, "iface= " "start_queue= " "queue_count= " - "shared_umem= "); + "shared_umem= " + "xdp_prog= ");