int if_index;
char if_name[IFNAMSIZ];
uint16_t queue_idx;
- struct ether_addr eth_addr;
+ struct rte_ether_addr eth_addr;
struct xsk_umem_info *umem;
struct rte_mempool *mb_pool_share;
};
static inline int
-reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size)
+reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size)
{
struct xsk_ring_prod *fq = &umem->fq;
+ void *addrs[reserve_size];
uint32_t idx;
- int i, ret;
+ uint16_t i;
- ret = xsk_ring_prod__reserve(fq, reserve_size, &idx);
- if (unlikely(!ret)) {
- AF_XDP_LOG(ERR, "Failed to reserve enough fq descs.\n");
- return ret;
+ if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
+ != reserve_size) {
+ AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+ return -1;
+ }
+
+ if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
+ AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
+ rte_ring_enqueue_bulk(umem->buf_ring, addrs,
+ reserve_size, NULL);
+ return -1;
}
for (i = 0; i < reserve_size; i++) {
__u64 *fq_addr;
- void *addr = NULL;
- if (rte_ring_dequeue(umem->buf_ring, &addr)) {
- i--;
- break;
- }
+
fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
- *fq_addr = (uint64_t)addr;
+ *fq_addr = (uint64_t)addrs[i];
}
- xsk_ring_prod__submit(fq, i);
+ xsk_ring_prod__submit(fq, reserve_size);
return 0;
}
struct xsk_ring_prod *fq = &umem->fq;
uint32_t idx_rx = 0;
uint32_t free_thresh = fq->size >> 1;
- struct rte_mbuf *mbufs[ETH_AF_XDP_TX_BATCH_SIZE];
+ struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
unsigned long dropped = 0;
unsigned long rx_bytes = 0;
- uint16_t count = 0;
int rcvd, i;
- nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
+ nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
+
+ if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
+ return 0;
rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
if (rcvd == 0)
- return 0;
+ goto out;
if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
(void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE);
- if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, rcvd) != 0))
- return 0;
-
for (i = 0; i < rcvd; i++) {
const struct xdp_desc *desc;
uint64_t addr;
rte_pktmbuf_pkt_len(mbufs[i]) = len;
rte_pktmbuf_data_len(mbufs[i]) = len;
rx_bytes += len;
- bufs[count++] = mbufs[i];
+ bufs[i] = mbufs[i];
rte_ring_enqueue(umem->buf_ring, (void *)addr);
}
rxq->stats.rx_pkts += (rcvd - dropped);
rxq->stats.rx_bytes += rx_bytes;
- return count;
+out:
+ if (rcvd != nb_pkts)
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
+ nb_pkts - rcvd);
+
+ return rcvd;
}
static void
struct rte_mbuf *mbuf;
void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
unsigned long tx_bytes = 0;
- int i, valid = 0;
+ int i;
uint32_t idx_tx;
nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
kick_tx(txq);
+ rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
return 0;
}
for (i = 0; i < nb_pkts; i++) {
struct xdp_desc *desc;
void *pkt;
- uint32_t buf_len = ETH_AF_XDP_FRAME_SIZE
- - ETH_AF_XDP_DATA_HEADROOM;
+
desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
mbuf = bufs[i];
- if (mbuf->pkt_len <= buf_len) {
- desc->addr = (uint64_t)addrs[valid];
- desc->len = mbuf->pkt_len;
- pkt = xsk_umem__get_data(umem->mz->addr,
- desc->addr);
- rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
- desc->len);
- valid++;
- tx_bytes += mbuf->pkt_len;
- }
+
+ desc->addr = (uint64_t)addrs[i];
+ desc->len = mbuf->pkt_len;
+ pkt = xsk_umem__get_data(umem->mz->addr,
+ desc->addr);
+ rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
+ desc->len);
+ tx_bytes += mbuf->pkt_len;
+
rte_pktmbuf_free(mbuf);
}
kick_tx(txq);
- if (valid < nb_pkts)
- rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid],
- nb_pkts - valid, NULL);
-
- txq->stats.err_pkts += nb_pkts - valid;
- txq->stats.tx_pkts += valid;
+ txq->stats.tx_pkts += nb_pkts;
txq->stats.tx_bytes += tx_bytes;
return nb_pkts;
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
+
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
XDP_FLAGS_UPDATE_IF_NOEXIST);
}
+static void
+xdp_umem_destroy(struct xsk_umem_info *umem)
+{
+ rte_memzone_free(umem->mz);
+ umem->mz = NULL;
+
+ rte_ring_free(umem->buf_ring);
+ umem->buf_ring = NULL;
+
+ rte_free(umem);
+ umem = NULL;
+}
+
static void
eth_dev_close(struct rte_eth_dev *dev)
{
}
(void)xsk_umem__delete(internals->umem->umem);
+
+ /*
+ * MAC is not allocated dynamically, setting it to NULL would prevent
+ * from releasing it in rte_eth_dev_release_port.
+ */
+ dev->data->mac_addrs = NULL;
+
+ xdp_umem_destroy(internals->umem);
+
remove_xdp_program(internals);
}
return 0;
}
-static void
-xdp_umem_destroy(struct xsk_umem_info *umem)
-{
- rte_memzone_free(umem->mz);
- umem->mz = NULL;
-
- rte_ring_free(umem->buf_ring);
- umem->buf_ring = NULL;
-
- rte_free(umem);
- umem = NULL;
-}
-
static struct
-xsk_umem_info *xdp_umem_configure(void)
+xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
{
struct xsk_umem_info *umem;
const struct rte_memzone *mz;
.comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
.frame_size = ETH_AF_XDP_FRAME_SIZE,
.frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
+ char ring_name[RTE_RING_NAMESIZE];
+ char mz_name[RTE_MEMZONE_NAMESIZE];
int ret;
uint64_t i;
return NULL;
}
- umem->buf_ring = rte_ring_create("af_xdp_ring",
+ snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
+ internals->if_name, internals->queue_idx);
+ umem->buf_ring = rte_ring_create(ring_name,
ETH_AF_XDP_NUM_BUFFERS,
rte_socket_id(),
0x0);
(void *)(i * ETH_AF_XDP_FRAME_SIZE +
ETH_AF_XDP_DATA_HEADROOM));
- mz = rte_memzone_reserve_aligned("af_xdp uemem",
+ snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
+ internals->if_name, internals->queue_idx);
+ mz = rte_memzone_reserve_aligned(mz_name,
ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
getpagesize());
int ret = 0;
int reserve_size;
- rxq->umem = xdp_umem_configure();
+ rxq->umem = xdp_umem_configure(internals);
if (rxq->umem == NULL)
return -ENOMEM;
static int
get_iface_info(const char *if_name,
- struct ether_addr *eth_addr,
+ struct rte_ether_addr *eth_addr,
int *if_index)
{
struct ifreq ifr;
if (ioctl(sock, SIOCGIFHWADDR, &ifr))
goto error;
- rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
close(sock);
return 0;
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_af_xdp_rx;
eth_dev->tx_pkt_burst = eth_af_xdp_tx;
+ /* Let rte_eth_dev_close() release the port resources. */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
return eth_dev;
rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- struct pmd_internals *internals;
AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
rte_socket_id());
if (eth_dev == NULL)
return -1;
- internals = eth_dev->data->dev_private;
-
- rte_ring_free(internals->umem->buf_ring);
- rte_memzone_free(internals->umem->mz);
- rte_free(internals->umem);
-
+ eth_dev_close(eth_dev);
rte_eth_dev_release_port(eth_dev);