X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Faf_xdp%2Frte_eth_af_xdp.c;h=802f912cb7c55b8b478e6a18bc6a327564e2a5e7;hb=e1543baea37db002238a30d120a58472fb6471a7;hp=8a3fa190434bf36eb024a89fd14835b90b2d6ef2;hpb=985e7673c06af30b3cd6d0ebadbec885f81c9bfb;p=dpdk.git diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 8a3fa19043..802f912cb7 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -15,7 +15,7 @@ #include #include #include "af_xdp_deps.h" -#include +#include #include #include @@ -139,6 +139,7 @@ struct pmd_internals { bool shared_umem; char prog_path[PATH_MAX]; bool custom_prog_configured; + struct bpf_map *map; struct rte_ether_addr eth_addr; @@ -164,10 +165,10 @@ static const char * const valid_arguments[] = { }; static const struct rte_eth_link pmd_link = { - .link_speed = ETH_SPEED_NUM_10G, - .link_duplex = ETH_LINK_FULL_DUPLEX, - .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_speed = RTE_ETH_SPEED_NUM_10G, + .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, + .link_status = RTE_ETH_LINK_DOWN, + .link_autoneg = RTE_ETH_LINK_AUTONEG }; /* List which tracks PMDs to facilitate sharing UMEMs across them. */ @@ -653,7 +654,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { - dev->data->dev_link.link_status = ETH_LINK_UP; + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; return 0; } @@ -662,7 +663,7 @@ eth_dev_start(struct rte_eth_dev *dev) static int eth_dev_stop(struct rte_eth_dev *dev) { - dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return 0; } @@ -695,67 +696,6 @@ find_internal_resource(struct pmd_internals *port_int) return list; } -/* Check if the netdev,qid context already exists */ -static inline bool -ctx_exists(struct pkt_rx_queue *rxq, const char *ifname, - struct pkt_rx_queue *list_rxq, const char *list_ifname) -{ - bool exists = false; - - if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx && - !strncmp(ifname, list_ifname, IFNAMSIZ)) { - AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n", - ifname, rxq->xsk_queue_idx); - exists = true; - } - - return exists; -} - -/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ -static inline int -get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, - struct xsk_umem_info **umem) -{ - struct internal_list *list; - struct pmd_internals *internals; - int i = 0, ret = 0; - struct rte_mempool *mb_pool = rxq->mb_pool; - - if (mb_pool == NULL) - return ret; - - pthread_mutex_lock(&internal_list_lock); - - TAILQ_FOREACH(list, &internal_list, next) { - internals = list->eth_dev->data->dev_private; - for (i = 0; i < internals->queue_cnt; i++) { - struct pkt_rx_queue *list_rxq = - &internals->rx_queues[i]; - if (rxq == list_rxq) - continue; - if (mb_pool == internals->rx_queues[i].mb_pool) { - if (ctx_exists(rxq, ifname, list_rxq, - internals->if_name)) { - ret = -1; - goto out; - } - if (__atomic_load_n( - &internals->rx_queues[i].umem->refcnt, - __ATOMIC_ACQUIRE)) { - *umem = internals->rx_queues[i].umem; - goto out; - } - } - } - } - -out: - pthread_mutex_unlock(&internal_list_lock); - - return ret; -} - static int eth_dev_configure(struct rte_eth_dev *dev) { @@ -1011,6 +951,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) return aligned_addr; } +/* Check if the netdev,qid context already exists */ +static inline bool +ctx_exists(struct pkt_rx_queue *rxq, const char *ifname, + struct pkt_rx_queue *list_rxq, const char *list_ifname) +{ + bool exists = false; + + if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx && + !strncmp(ifname, list_ifname, IFNAMSIZ)) { + AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n", + ifname, rxq->xsk_queue_idx); + exists = true; + } + + return exists; +} + +/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ +static inline int +get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, + struct xsk_umem_info **umem) +{ + struct internal_list *list; + struct pmd_internals *internals; + int i = 0, ret = 0; + struct rte_mempool *mb_pool = rxq->mb_pool; + + if (mb_pool == NULL) + return ret; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + internals = list->eth_dev->data->dev_private; + for (i = 0; i < internals->queue_cnt; i++) { + struct pkt_rx_queue *list_rxq = + &internals->rx_queues[i]; + if (rxq == list_rxq) + continue; + if (mb_pool == internals->rx_queues[i].mb_pool) { + if (ctx_exists(rxq, ifname, list_rxq, + internals->if_name)) { + ret = -1; + goto out; + } + if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt, + __ATOMIC_ACQUIRE)) { + *umem = internals->rx_queues[i].umem; + goto out; + } + } + } + } + +out: + pthread_mutex_unlock(&internal_list_lock); + + return ret; +} + static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq) @@ -1078,6 +1078,12 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE); } + return umem; + +err: + xdp_umem_destroy(umem); + return NULL; +} #else static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, @@ -1138,20 +1144,19 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, } umem->mz = mz; -#endif return umem; err: xdp_umem_destroy(umem); return NULL; } +#endif static int -load_custom_xdp_prog(const char *prog_path, int if_index) +load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) { int ret, prog_fd = -1; struct bpf_object *obj; - struct bpf_map *map; ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (ret) { @@ -1161,11 +1166,10 @@ load_custom_xdp_prog(const char *prog_path, int if_index) /* * The loaded program must provision for a map of xsks, such that some - * traffic can be redirected to userspace. When the xsk is created, - * libbpf inserts it into the map. + * traffic can be redirected to userspace. */ - map = bpf_object__find_map_by_name(obj, "xsks_map"); - if (!map) { + *map = bpf_object__find_map_by_name(obj, "xsks_map"); + if (!*map) { AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path); return -1; } @@ -1272,13 +1276,15 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, if (strnlen(internals->prog_path, PATH_MAX) && !internals->custom_prog_configured) { ret = load_custom_xdp_prog(internals->prog_path, - internals->if_index); + internals->if_index, + &internals->map); if (ret) { AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n", internals->prog_path); goto err; } internals->custom_prog_configured = 1; + cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD; } if (internals->shared_umem) @@ -1295,6 +1301,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, goto err; } + /* insert the xsk into the xsks_map */ + if (internals->custom_prog_configured) { + int err, fd; + + fd = xsk_socket__fd(rxq->xsk); + err = bpf_map_update_elem(bpf_map__fd(internals->map), + &rxq->xsk_queue_idx, &fd, 0); + if (err) { + AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n"); + goto err; + } + } + #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); if (ret) {