X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Faf_xdp%2Frte_eth_af_xdp.c;h=eaf2c9c873bc5a3fd29e7dbed065489bc591e3f1;hb=3a086c203829e1c258a47a4e02d1511933456e4a;hp=ff8e90589e19aa5d4de2c194571a25605fcc80db;hpb=3e912e39d1b2d12e350e82b7fef07d8ba484a0eb;p=dpdk.git diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index ff8e90589e..eaf2c9c873 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation. + * Copyright(c) 2019-2020 Intel Corporation. */ #include #include @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -34,9 +33,14 @@ #include #include #include +#include #include #include #include +#include + +#include "compat.h" + #ifndef SOL_XDP #define SOL_XDP 283 @@ -50,7 +54,7 @@ #define PF_XDP AF_XDP #endif -static int af_xdp_logtype; +RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE); #define AF_XDP_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, af_xdp_logtype, \ @@ -58,7 +62,6 @@ static int af_xdp_logtype; #define ETH_AF_XDP_FRAME_SIZE 2048 #define ETH_AF_XDP_NUM_BUFFERS 4096 -#define ETH_AF_XDP_DATA_HEADROOM 0 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1 @@ -68,12 +71,13 @@ static int af_xdp_logtype; struct xsk_umem_info { - struct xsk_ring_prod fq; - struct xsk_ring_cons cq; struct xsk_umem *umem; struct rte_ring *buf_ring; const struct rte_memzone *mz; - int pmd_zc; + struct rte_mempool *mb_pool; + void *buffer; + uint8_t refcnt; + uint32_t max_xsks; }; struct rx_stats { @@ -90,6 +94,9 @@ struct pkt_rx_queue { struct rx_stats stats; + struct xsk_ring_prod fq; + struct xsk_ring_cons cq; + struct pkt_tx_queue *pair; struct pollfd fds[1]; int xsk_queue_idx; @@ -97,12 +104,13 @@ struct pkt_rx_queue { struct tx_stats { uint64_t tx_pkts; - uint64_t err_pkts; uint64_t tx_bytes; + uint64_t tx_dropped; }; struct pkt_tx_queue { struct xsk_ring_prod tx; + struct xsk_umem_info *umem; struct tx_stats stats; @@ -117,8 +125,10 @@ struct pmd_internals { int queue_cnt; int max_queue_cnt; int combined_queue_cnt; + bool shared_umem; + char prog_path[PATH_MAX]; + bool custom_prog_configured; - int pmd_zc; struct rte_ether_addr eth_addr; struct pkt_rx_queue *rx_queues; @@ -128,13 +138,15 @@ struct pmd_internals { #define ETH_AF_XDP_IFACE_ARG "iface" #define ETH_AF_XDP_START_QUEUE_ARG "start_queue" #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count" -#define ETH_AF_XDP_PMD_ZC_ARG "pmd_zero_copy" +#define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem" +#define ETH_AF_XDP_PROG_ARG "xdp_prog" static const char * const valid_arguments[] = { ETH_AF_XDP_IFACE_ARG, ETH_AF_XDP_START_QUEUE_ARG, ETH_AF_XDP_QUEUE_COUNT_ARG, - ETH_AF_XDP_PMD_ZC_ARG, + ETH_AF_XDP_SHARED_UMEM_ARG, + ETH_AF_XDP_PROG_ARG, NULL }; @@ -145,10 +157,53 @@ static const struct rte_eth_link pmd_link = { .link_autoneg = ETH_LINK_AUTONEG }; +/* List which tracks PMDs to facilitate sharing UMEMs across them. */ +struct internal_list { + TAILQ_ENTRY(internal_list) next; + struct rte_eth_dev *eth_dev; +}; + +TAILQ_HEAD(internal_list_head, internal_list); +static struct internal_list_head internal_list = + TAILQ_HEAD_INITIALIZER(internal_list); + +static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) static inline int -reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size) +reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs, struct xsk_ring_prod *fq) +{ + uint32_t idx; + uint16_t i; + + if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) { + for (i = 0; i < reserve_size; i++) + rte_pktmbuf_free(bufs[i]); + AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n"); + return -1; + } + + for (i = 0; i < reserve_size; i++) { + __u64 *fq_addr; + uint64_t addr; + + fq_addr = xsk_ring_prod__fill_addr(fq, idx++); + addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + *fq_addr = addr; + } + + xsk_ring_prod__submit(fq, reserve_size); + + return 0; +} +#else +static inline int +reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs __rte_unused, + struct xsk_ring_prod *fq) { - struct xsk_ring_prod *fq = &umem->fq; void *addrs[reserve_size]; uint32_t idx; uint16_t i; @@ -177,37 +232,41 @@ reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size) return 0; } +#endif -static void -umem_buf_release_to_fq(void *addr, void *opaque) +static inline int +reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs, struct xsk_ring_prod *fq) { - struct xsk_umem_info *umem = (struct xsk_umem_info *)opaque; - uint64_t umem_addr = (uint64_t)addr - umem->mz->addr_64; - - rte_ring_enqueue(umem->buf_ring, (void *)umem_addr); +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return reserve_fill_queue_zc(umem, reserve_size, bufs, fq); +#else + return reserve_fill_queue_cp(umem, reserve_size, bufs, fq); +#endif } +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) static uint16_t -eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct pkt_rx_queue *rxq = queue; struct xsk_ring_cons *rx = &rxq->rx; + struct xsk_ring_prod *fq = &rxq->fq; struct xsk_umem_info *umem = rxq->umem; - struct xsk_ring_prod *fq = &umem->fq; uint32_t idx_rx = 0; - uint32_t free_thresh = fq->size >> 1; - int pmd_zc = umem->pmd_zc; - struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; - unsigned long dropped = 0; unsigned long rx_bytes = 0; int rcvd, i; + struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; - nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE); - - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); return 0; + } rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (rcvd == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) @@ -217,33 +276,90 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto out; } + for (i = 0; i < rcvd; i++) { + const struct xdp_desc *desc; + uint64_t addr; + uint32_t len; + uint64_t offset; + + desc = xsk_ring_cons__rx_desc(rx, idx_rx++); + addr = desc->addr; + len = desc->len; + + offset = xsk_umem__extract_offset(addr); + addr = xsk_umem__extract_addr(addr); + + bufs[i] = (struct rte_mbuf *) + xsk_umem__get_data(umem->buffer, addr + + umem->mb_pool->header_size); + bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - + rte_pktmbuf_priv_size(umem->mb_pool) - + umem->mb_pool->header_size; + + rte_pktmbuf_pkt_len(bufs[i]) = len; + rte_pktmbuf_data_len(bufs[i]) = len; + rx_bytes += len; + } + + xsk_ring_cons__release(rx, rcvd); + + (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq); + + /* statistics */ + rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_bytes += rx_bytes; + +out: + if (rcvd != nb_pkts) + rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], + nb_pkts - rcvd); + + return rcvd; +} +#else +static uint16_t +af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct pkt_rx_queue *rxq = queue; + struct xsk_ring_cons *rx = &rxq->rx; + struct xsk_umem_info *umem = rxq->umem; + struct xsk_ring_prod *fq = &rxq->fq; + uint32_t idx_rx = 0; + unsigned long rx_bytes = 0; + int rcvd, i; + uint32_t free_thresh = fq->size >> 1; + struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) - (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE); + (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, + NULL, fq); + + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) + return 0; + + rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (rcvd == 0) { +#if defined(XDP_USE_NEED_WAKEUP) + if (xsk_ring_prod__needs_wakeup(fq)) + (void)poll(rxq->fds, 1, 1000); +#endif + + goto out; + } for (i = 0; i < rcvd; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; void *pkt; - uint16_t buf_len = ETH_AF_XDP_FRAME_SIZE; - struct rte_mbuf_ext_shared_info *shinfo; desc = xsk_ring_cons__rx_desc(rx, idx_rx++); addr = desc->addr; len = desc->len; pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr); - if (pmd_zc) { - shinfo = rte_pktmbuf_ext_shinfo_init_helper(pkt, - &buf_len, umem_buf_release_to_fq, umem); - - rte_pktmbuf_attach_extbuf(mbufs[i], pkt, 0, buf_len, - shinfo); - } else { - rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), - pkt, len); - rte_ring_enqueue(umem->buf_ring, (void *)addr); - } + rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len); + rte_ring_enqueue(umem->buf_ring, (void *)addr); rte_pktmbuf_pkt_len(mbufs[i]) = len; rte_pktmbuf_data_len(mbufs[i]) = len; rx_bytes += len; @@ -253,7 +369,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) xsk_ring_cons__release(rx, rcvd); /* statistics */ - rxq->stats.rx_pkts += (rcvd - dropped); + rxq->stats.rx_pkts += rcvd; rxq->stats.rx_bytes += rx_bytes; out: @@ -263,11 +379,23 @@ out: return rcvd; } +#endif + +static uint16_t +eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE); + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return af_xdp_rx_zc(queue, bufs, nb_pkts); +#else + return af_xdp_rx_cp(queue, bufs, nb_pkts); +#endif +} static void -pull_umem_cq(struct xsk_umem_info *umem, int size) +pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq) { - struct xsk_ring_cons *cq = &umem->cq; size_t i, n; uint32_t idx_cq = 0; @@ -276,16 +404,25 @@ pull_umem_cq(struct xsk_umem_info *umem, int size) for (i = 0; i < n; i++) { uint64_t addr; addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + addr = xsk_umem__extract_addr(addr); + rte_pktmbuf_free((struct rte_mbuf *) + xsk_umem__get_data(umem->buffer, + addr + umem->mb_pool->header_size)); +#else rte_ring_enqueue(umem->buf_ring, (void *)addr); +#endif } xsk_ring_cons__release(cq, n); } static void -kick_tx(struct pkt_tx_queue *txq) +kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq) { - struct xsk_umem_info *umem = txq->pair->umem; + struct xsk_umem_info *umem = txq->umem; + + pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq); #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(&txq->tx)) @@ -298,34 +435,112 @@ kick_tx(struct pkt_tx_queue *txq) /* pull from completion queue to leave more space */ if (errno == EAGAIN) - pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE); + pull_umem_cq(umem, + XSK_RING_CONS__DEFAULT_NUM_DESCS, + cq); } - pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE); } -static inline bool -in_umem_range(struct xsk_umem_info *umem, uint64_t addr) +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static uint16_t +af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { - uint64_t mz_base_addr = umem->mz->addr_64; + struct pkt_tx_queue *txq = queue; + struct xsk_umem_info *umem = txq->umem; + struct rte_mbuf *mbuf; + unsigned long tx_bytes = 0; + int i; + uint32_t idx_tx; + uint16_t count = 0; + struct xdp_desc *desc; + uint64_t addr, offset; + struct xsk_ring_cons *cq = &txq->pair->cq; + uint32_t free_thresh = cq->size >> 1; - return addr >= mz_base_addr && addr < mz_base_addr + umem->mz->len; -} + if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh) + pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq); + + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + + if (mbuf->pool == umem->mb_pool) { + if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) { + kick_tx(txq, cq); + if (!xsk_ring_prod__reserve(&txq->tx, 1, + &idx_tx)) + goto out; + } + desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx); + desc->len = mbuf->pkt_len; + addr = (uint64_t)mbuf - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + offset = rte_pktmbuf_mtod(mbuf, uint64_t) - + (uint64_t)mbuf + + umem->mb_pool->header_size; + offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT; + desc->addr = addr | offset; + count++; + } else { + struct rte_mbuf *local_mbuf = + rte_pktmbuf_alloc(umem->mb_pool); + void *pkt; + + if (local_mbuf == NULL) + goto out; + + if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) { + rte_pktmbuf_free(local_mbuf); + kick_tx(txq, cq); + goto out; + } + + desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx); + desc->len = mbuf->pkt_len; + + addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) - + (uint64_t)local_mbuf + + umem->mb_pool->header_size; + pkt = xsk_umem__get_data(umem->buffer, addr + offset); + offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT; + desc->addr = addr | offset; + rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), + desc->len); + rte_pktmbuf_free(mbuf); + count++; + } + + tx_bytes += mbuf->pkt_len; + } + + kick_tx(txq, cq); + +out: + xsk_ring_prod__submit(&txq->tx, count); + + txq->stats.tx_pkts += count; + txq->stats.tx_bytes += tx_bytes; + txq->stats.tx_dropped += nb_pkts - count; + return count; +} +#else static uint16_t -eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct pkt_tx_queue *txq = queue; - struct xsk_umem_info *umem = txq->pair->umem; + struct xsk_umem_info *umem = txq->umem; struct rte_mbuf *mbuf; - int pmd_zc = umem->pmd_zc; void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; unsigned long tx_bytes = 0; int i; uint32_t idx_tx; + struct xsk_ring_cons *cq = &txq->pair->cq; nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE); - pull_umem_cq(umem, nb_pkts); + pull_umem_cq(umem, nb_pkts, cq); nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, nb_pkts, NULL); @@ -333,7 +548,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return 0; if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { - kick_tx(txq); + kick_tx(txq, cq); rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL); return 0; } @@ -346,38 +561,34 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) mbuf = bufs[i]; desc->len = mbuf->pkt_len; - /* - * We need to make sure the external mbuf address is within - * current port's umem memzone range - */ - if (pmd_zc && RTE_MBUF_HAS_EXTBUF(mbuf) && - in_umem_range(umem, (uint64_t)mbuf->buf_addr)) { - desc->addr = (uint64_t)mbuf->buf_addr - - umem->mz->addr_64; - mbuf->buf_addr = xsk_umem__get_data(umem->mz->addr, - (uint64_t)addrs[i]); - } else { - desc->addr = (uint64_t)addrs[i]; - pkt = xsk_umem__get_data(umem->mz->addr, - desc->addr); - rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), - desc->len); - } + desc->addr = (uint64_t)addrs[i]; + pkt = xsk_umem__get_data(umem->mz->addr, + desc->addr); + rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len); tx_bytes += mbuf->pkt_len; + rte_pktmbuf_free(mbuf); } xsk_ring_prod__submit(&txq->tx, nb_pkts); - kick_tx(txq); + kick_tx(txq, cq); txq->stats.tx_pkts += nb_pkts; txq->stats.tx_bytes += tx_bytes; - for (i = 0; i < nb_pkts; i++) - rte_pktmbuf_free(bufs[i]); - return nb_pkts; } +#endif + +static uint16_t +eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return af_xdp_tx_zc(queue, bufs, nb_pkts); +#else + return af_xdp_tx_cp(queue, bufs, nb_pkts); +#endif +} static int eth_dev_start(struct rte_eth_dev *dev) @@ -394,17 +605,105 @@ eth_dev_stop(struct rte_eth_dev *dev) dev->data->dev_link.link_status = ETH_LINK_DOWN; } +/* Find ethdev in list */ +static inline struct internal_list * +find_internal_resource(struct pmd_internals *port_int) +{ + int found = 0; + struct internal_list *list = NULL; + + if (port_int == NULL) + return NULL; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + struct pmd_internals *list_int = + list->eth_dev->data->dev_private; + if (list_int == port_int) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ +static inline struct xsk_umem_info * +get_shared_umem(struct pkt_rx_queue *rxq) { + struct internal_list *list; + struct pmd_internals *internals; + int i = 0; + struct rte_mempool *mb_pool = rxq->mb_pool; + + if (mb_pool == NULL) + return NULL; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + internals = list->eth_dev->data->dev_private; + for (i = 0; i < internals->queue_cnt; i++) { + struct pkt_rx_queue *list_rxq = + &internals->rx_queues[i]; + if (rxq == list_rxq) + continue; + if (mb_pool == internals->rx_queues[i].mb_pool) { + if (__atomic_load_n( + &internals->rx_queues[i].umem->refcnt, + __ATOMIC_ACQUIRE)) { + pthread_mutex_unlock( + &internal_list_lock); + return internals->rx_queues[i].umem; + } + } + } + } + + pthread_mutex_unlock(&internal_list_lock); + + return NULL; +} + static int eth_dev_configure(struct rte_eth_dev *dev) { + struct pmd_internals *internal = dev->data->dev_private; + /* rx/tx must be paired */ if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) return -EINVAL; + if (internal->shared_umem) { + struct internal_list *list = NULL; + const char *name = dev->device->name; + + /* Ensure PMD is not already inserted into the list */ + list = find_internal_resource(internal); + if (list) + return 0; + + list = rte_zmalloc_socket(name, sizeof(*list), 0, + dev->device->numa_node); + if (list == NULL) + return -1; + + list->eth_dev = dev; + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + } + return 0; } -static void +static int eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct pmd_internals *internals = dev->data->dev_private; @@ -416,12 +715,21 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues = internals->queue_cnt; dev_info->min_mtu = RTE_ETHER_MIN_MTU; - dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM; +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + dev_info->max_mtu = getpagesize() - + sizeof(struct rte_mempool_objhdr) - + sizeof(struct rte_mbuf) - + RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM; +#else + dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM; +#endif dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS; dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS; + + return 0; } static int @@ -447,6 +755,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ipackets += stats->q_ipackets[i]; stats->ibytes += stats->q_ibytes[i]; stats->imissed += rxq->stats.rx_dropped; + stats->oerrors += txq->stats.tx_dropped; ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP, XDP_STATISTICS, &xdp_stats, &optlen); if (ret != 0) { @@ -456,14 +765,13 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->imissed += xdp_stats.rx_dropped; stats->opackets += stats->q_opackets[i]; - stats->oerrors += txq->stats.err_pkts; stats->obytes += stats->q_obytes[i]; } return 0; } -static void +static int eth_stats_reset(struct rte_eth_dev *dev) { struct pmd_internals *internals = dev->data->dev_private; @@ -475,6 +783,8 @@ eth_stats_reset(struct rte_eth_dev *dev) memset(&internals->tx_queues[i].stats, 0, sizeof(struct tx_stats)); } + + return 0; } static void @@ -494,23 +804,30 @@ remove_xdp_program(struct pmd_internals *internals) static void xdp_umem_destroy(struct xsk_umem_info *umem) { +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + umem->mb_pool = NULL; +#else rte_memzone_free(umem->mz); umem->mz = NULL; rte_ring_free(umem->buf_ring); umem->buf_ring = NULL; +#endif rte_free(umem); umem = NULL; } -static void +static int eth_dev_close(struct rte_eth_dev *dev) { struct pmd_internals *internals = dev->data->dev_private; struct pkt_rx_queue *rxq; int i; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n", rte_socket_id()); @@ -519,8 +836,12 @@ eth_dev_close(struct rte_eth_dev *dev) if (rxq->umem == NULL) break; xsk_socket__delete(rxq->xsk); - (void)xsk_umem__delete(rxq->umem->umem); - xdp_umem_destroy(rxq->umem); + + if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) + == 0) { + (void)xsk_umem__delete(rxq->umem->umem); + xdp_umem_destroy(rxq->umem); + } /* free pkt_tx_queue */ rte_free(rxq->pair); @@ -534,6 +855,21 @@ eth_dev_close(struct rte_eth_dev *dev) dev->data->mac_addrs = NULL; remove_xdp_program(internals); + + if (internals->shared_umem) { + struct internal_list *list; + + /* Remove ethdev from list used to track and share UMEMs */ + list = find_internal_resource(internals); + if (list) { + pthread_mutex_lock(&internal_list_lock); + TAILQ_REMOVE(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + rte_free(list); + } + } + + return 0; } static void @@ -548,6 +884,85 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, return 0; } +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align) +{ + struct rte_mempool_memhdr *memhdr; + uint64_t memhdr_addr, aligned_addr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + memhdr_addr = (uint64_t)memhdr->addr; + aligned_addr = memhdr_addr & ~(getpagesize() - 1); + *align = memhdr_addr - aligned_addr; + + return aligned_addr; +} + +static struct +xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + struct pkt_rx_queue *rxq) +{ + struct xsk_umem_info *umem = NULL; + int ret; + struct xsk_umem_config usr_config = { + .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2, + .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, + .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG}; + void *base_addr = NULL; + struct rte_mempool *mb_pool = rxq->mb_pool; + uint64_t umem_size, align = 0; + + if (internals->shared_umem) { + umem = get_shared_umem(rxq); + if (umem != NULL && + __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) < + umem->max_xsks) { + AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n", + internals->if_name, rxq->xsk_queue_idx); + __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE); + } + } + + if (umem == NULL) { + usr_config.frame_size = + rte_mempool_calc_obj_size(mb_pool->elt_size, + mb_pool->flags, NULL); + usr_config.frame_headroom = mb_pool->header_size + + sizeof(struct rte_mbuf) + + rte_pktmbuf_priv_size(mb_pool) + + RTE_PKTMBUF_HEADROOM; + + umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, + rte_socket_id()); + if (umem == NULL) { + AF_XDP_LOG(ERR, "Failed to allocate umem info"); + return NULL; + } + + umem->mb_pool = mb_pool; + base_addr = (void *)get_base_addr(mb_pool, &align); + umem_size = mb_pool->populated_size * usr_config.frame_size + + align; + + ret = xsk_umem__create(&umem->umem, base_addr, umem_size, + &rxq->fq, &rxq->cq, &usr_config); + if (ret) { + AF_XDP_LOG(ERR, "Failed to create umem"); + goto err; + } + umem->buffer = base_addr; + + if (internals->shared_umem) { + umem->max_xsks = mb_pool->populated_size / + ETH_AF_XDP_NUM_BUFFERS; + AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n", + mb_pool->name, umem->max_xsks); + } + + __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE); + } + +#else static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq) @@ -558,7 +973,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS, .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, .frame_size = ETH_AF_XDP_FRAME_SIZE, - .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; + .frame_headroom = 0 }; char ring_name[RTE_RING_NAMESIZE]; char mz_name[RTE_MEMZONE_NAMESIZE]; int ret; @@ -583,8 +998,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) rte_ring_enqueue(umem->buf_ring, - (void *)(i * ETH_AF_XDP_FRAME_SIZE + - ETH_AF_XDP_DATA_HEADROOM)); + (void *)(i * ETH_AF_XDP_FRAME_SIZE)); snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u", internals->if_name, rxq->xsk_queue_idx); @@ -599,7 +1013,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, ret = xsk_umem__create(&umem->umem, mz->addr, ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, - &umem->fq, &umem->cq, + &rxq->fq, &rxq->cq, &usr_config); if (ret) { @@ -608,6 +1022,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, } umem->mz = mz; +#endif return umem; err: @@ -615,6 +1030,45 @@ err: return NULL; } +static int +load_custom_xdp_prog(const char *prog_path, int if_index) +{ + int ret, prog_fd = -1; + struct bpf_object *obj; + struct bpf_map *map; + + ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (ret) { + AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path); + return ret; + } + + /* + * The loaded program must provision for a map of xsks, such that some + * traffic can be redirected to userspace. When the xsk is created, + * libbpf inserts it into the map. + */ + map = bpf_object__find_map_by_name(obj, "xsks_map"); + if (!map) { + AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path); + return -1; + } + + /* Link the program with the given network device */ + ret = bpf_set_link_xdp_fd(if_index, prog_fd, + XDP_FLAGS_UPDATE_IF_NOEXIST); + if (ret) { + AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n", + prog_fd); + return -1; + } + + AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n", + prog_path, prog_fd); + + return 0; +} + static int xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, int ring_size) @@ -622,11 +1076,13 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, struct xsk_socket_config cfg; struct pkt_tx_queue *txq = rxq->pair; int ret = 0; - int reserve_size; + int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS; + struct rte_mbuf *fq_bufs[reserve_size]; rxq->umem = xdp_umem_configure(internals, rxq); if (rxq->umem == NULL) return -ENOMEM; + txq->umem = rxq->umem; cfg.rx_size = ring_size; cfg.tx_size = ring_size; @@ -638,16 +1094,39 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, cfg.bind_flags |= XDP_USE_NEED_WAKEUP; #endif - ret = xsk_socket__create(&rxq->xsk, internals->if_name, - rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx, - &txq->tx, &cfg); + if (strnlen(internals->prog_path, PATH_MAX) && + !internals->custom_prog_configured) { + ret = load_custom_xdp_prog(internals->prog_path, + internals->if_index); + if (ret) { + AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n", + internals->prog_path); + goto err; + } + internals->custom_prog_configured = 1; + } + + if (internals->shared_umem) + ret = create_shared_socket(&rxq->xsk, internals->if_name, + rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx, + &txq->tx, &rxq->fq, &rxq->cq, &cfg); + else + ret = xsk_socket__create(&rxq->xsk, internals->if_name, + rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx, + &txq->tx, &cfg); + if (ret) { AF_XDP_LOG(ERR, "Failed to create xsk socket.\n"); goto err; } - reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2; - ret = reserve_fill_queue(rxq->umem, reserve_size); +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) { + AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); + goto err; + } +#endif + ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq); if (ret) { xsk_socket__delete(rxq->xsk); AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n"); @@ -657,7 +1136,8 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, return 0; err: - xdp_umem_destroy(rxq->umem); + if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0) + xdp_umem_destroy(rxq->umem); return ret; } @@ -671,7 +1151,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mb_pool) { struct pmd_internals *internals = dev->data->dev_private; - uint32_t buf_size, data_size; struct pkt_rx_queue *rxq; int ret; @@ -679,10 +1158,14 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n", rx_queue_id, rxq->xsk_queue_idx); + +#ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG + uint32_t buf_size, data_size; + /* Now get the space available for data in the mbuf */ buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; - data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM; + data_size = ETH_AF_XDP_FRAME_SIZE; if (data_size > buf_size) { AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n", @@ -690,6 +1173,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, ret = -ENOMEM; goto err; } +#endif rxq->mb_pool = mb_pool; @@ -702,8 +1186,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, rxq->fds[0].fd = xsk_socket__fd(rxq->xsk); rxq->fds[0].events = POLLIN; - rxq->umem->pmd_zc = internals->pmd_zc; - dev->data->rx_queues[rx_queue_id] = rxq; return 0; @@ -746,41 +1228,47 @@ eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return (ret < 0) ? -errno : 0; } -static void +static int eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask) { struct ifreq ifr; + int ret = 0; int s; s = socket(PF_INET, SOCK_DGRAM, 0); if (s < 0) - return; + return -errno; strlcpy(ifr.ifr_name, if_name, IFNAMSIZ); - if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) + if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) { + ret = -errno; goto out; + } ifr.ifr_flags &= mask; ifr.ifr_flags |= flags; - if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) + if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) { + ret = -errno; goto out; + } out: close(s); + return ret; } -static void +static int eth_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct pmd_internals *internals = dev->data->dev_private; - eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0); + return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0); } -static void +static int eth_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct pmd_internals *internals = dev->data->dev_private; - eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC); + return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC); } static const struct eth_dev_ops ops = { @@ -836,6 +1324,30 @@ parse_name_arg(const char *key __rte_unused, return 0; } +/** parse xdp prog argument */ +static int +parse_prog_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + char *path = extra_args; + + if (strnlen(value, PATH_MAX) == PATH_MAX) { + AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n", + value, PATH_MAX); + return -EINVAL; + } + + if (access(value, F_OK) != 0) { + AF_XDP_LOG(ERR, "Error accessing %s: %s\n", + value, strerror(errno)); + return -EINVAL; + } + + strlcpy(path, value, PATH_MAX); + + return 0; +} + static int xdp_get_channels_info(const char *if_name, int *max_queues, int *combined_queues) @@ -852,9 +1364,13 @@ xdp_get_channels_info(const char *if_name, int *max_queues, ifr.ifr_data = (void *)&channels; strncpy(ifr.ifr_name, if_name, IFNAMSIZ); ret = ioctl(fd, SIOCETHTOOL, &ifr); - if (ret && errno != EOPNOTSUPP) { - ret = -errno; - goto out; + if (ret) { + if (errno == EOPNOTSUPP) { + ret = 0; + } else { + ret = -errno; + goto out; + } } if (channels.max_combined == 0 || errno == EOPNOTSUPP) { @@ -875,7 +1391,7 @@ xdp_get_channels_info(const char *if_name, int *max_queues, static int parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, - int *queue_cnt, int *pmd_zc) + int *queue_cnt, int *shared_umem, char *prog_path) { int ret; @@ -896,8 +1412,13 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, goto free_kvlist; } - ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PMD_ZC_ARG, - &parse_integer_arg, pmd_zc); + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG, + &parse_integer_arg, shared_umem); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG, + &parse_prog_arg, prog_path); if (ret < 0) goto free_kvlist; @@ -938,7 +1459,8 @@ error: static struct rte_eth_dev * init_internals(struct rte_vdev_device *dev, const char *if_name, - int start_queue_idx, int queue_cnt, int pmd_zc) + int start_queue_idx, int queue_cnt, int shared_umem, + const char *prog_path) { const char *name = rte_vdev_device_name(dev); const unsigned int numa_node = dev->device.numa_node; @@ -953,8 +1475,18 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, internals->start_queue_idx = start_queue_idx; internals->queue_cnt = queue_cnt; - internals->pmd_zc = pmd_zc; strlcpy(internals->if_name, if_name, IFNAMSIZ); + strlcpy(internals->prog_path, prog_path, PATH_MAX); + internals->custom_prog_configured = 0; + +#ifndef ETH_AF_XDP_SHARED_UMEM + if (shared_umem) { + AF_XDP_LOG(ERR, "Shared UMEM feature not available. " + "Check kernel and libbpf version\n"); + goto err_free_internals; + } +#endif + internals->shared_umem = shared_umem; if (xdp_get_channels_info(if_name, &internals->max_queue_cnt, &internals->combined_queue_cnt)) { @@ -1006,11 +1538,10 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, eth_dev->dev_ops = &ops; eth_dev->rx_pkt_burst = eth_af_xdp_rx; eth_dev->tx_pkt_burst = eth_af_xdp_tx; - /* Let rte_eth_dev_close() release the port resources. */ - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - if (internals->pmd_zc) - AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n"); +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n"); +#endif return eth_dev; @@ -1030,9 +1561,10 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) char if_name[IFNAMSIZ] = {'\0'}; int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX; int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT; + int shared_umem = 0; + char prog_path[PATH_MAX] = {'\0'}; struct rte_eth_dev *eth_dev = NULL; const char *name; - int pmd_zc = 0; AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n", rte_vdev_device_name(dev)); @@ -1060,7 +1592,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) dev->device.numa_node = rte_socket_id(); if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx, - &xsk_queue_cnt, &pmd_zc) < 0) { + &xsk_queue_cnt, &shared_umem, prog_path) < 0) { AF_XDP_LOG(ERR, "Invalid kvargs value\n"); return -EINVAL; } @@ -1071,7 +1603,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) } eth_dev = init_internals(dev, if_name, xsk_start_queue_idx, - xsk_queue_cnt, pmd_zc); + xsk_queue_cnt, shared_umem, prog_path); if (eth_dev == NULL) { AF_XDP_LOG(ERR, "Failed to init internals\n"); return -1; @@ -1115,11 +1647,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp, "iface= " "start_queue= " "queue_count= " - "pmd_zero_copy=<0|1>"); - -RTE_INIT(af_xdp_init_log) -{ - af_xdp_logtype = rte_log_register("pmd.net.af_xdp"); - if (af_xdp_logtype >= 0) - rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE); -} + "shared_umem= " + "xdp_prog= ");