#include <linux/ethtool.h>
#include <linux/sockios.h>
#include "af_xdp_deps.h"
-#include <bpf/xsk.h>
+#include <bpf/bpf.h>
#include <rte_ethdev.h>
#include <ethdev_driver.h>
#define ETH_AF_XDP_RX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
#define ETH_AF_XDP_TX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
+#define ETH_AF_XDP_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
struct xsk_umem_info {
struct xsk_umem *umem;
bool shared_umem;
char prog_path[PATH_MAX];
bool custom_prog_configured;
+ struct bpf_map *map;
struct rte_ether_addr eth_addr;
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG
};
/* List which tracks PMDs to facilitate sharing UMEMs across them. */
if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
rte_pktmbuf_free(local_mbuf);
- kick_tx(txq, cq);
goto out;
}
tx_bytes += mbuf->pkt_len;
}
- kick_tx(txq, cq);
-
out:
xsk_ring_prod__submit(&txq->tx, count);
+ kick_tx(txq, cq);
txq->stats.tx_pkts += count;
txq->stats.tx_bytes += tx_bytes;
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
static int
eth_dev_stop(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
return list;
}
-/* Check if the netdev,qid context already exists */
-static inline bool
-ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
- struct pkt_rx_queue *list_rxq, const char *list_ifname)
-{
- bool exists = false;
-
- if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
- !strncmp(ifname, list_ifname, IFNAMSIZ)) {
- AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
- ifname, rxq->xsk_queue_idx);
- exists = true;
- }
-
- return exists;
-}
-
-/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
-static inline int
-get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
- struct xsk_umem_info **umem)
-{
- struct internal_list *list;
- struct pmd_internals *internals;
- int i = 0, ret = 0;
- struct rte_mempool *mb_pool = rxq->mb_pool;
-
- if (mb_pool == NULL)
- return ret;
-
- pthread_mutex_lock(&internal_list_lock);
-
- TAILQ_FOREACH(list, &internal_list, next) {
- internals = list->eth_dev->data->dev_private;
- for (i = 0; i < internals->queue_cnt; i++) {
- struct pkt_rx_queue *list_rxq =
- &internals->rx_queues[i];
- if (rxq == list_rxq)
- continue;
- if (mb_pool == internals->rx_queues[i].mb_pool) {
- if (ctx_exists(rxq, ifname, list_rxq,
- internals->if_name)) {
- ret = -1;
- goto out;
- }
- if (__atomic_load_n(
- &internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
- *umem = internals->rx_queues[i].umem;
- goto out;
- }
- }
- }
- }
-
-out:
- pthread_mutex_unlock(&internal_list_lock);
-
- return ret;
-}
-
static int
eth_dev_configure(struct rte_eth_dev *dev)
{
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = ETH_FRAME_LEN;
dev_info->max_rx_queues = internals->queue_cnt;
dev_info->max_tx_queues = internals->queue_cnt;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
- dev_info->max_mtu = getpagesize() -
- sizeof(struct rte_mempool_objhdr) -
- sizeof(struct rte_mbuf) -
- RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
+ dev_info->max_rx_pktlen = getpagesize() -
+ sizeof(struct rte_mempool_objhdr) -
+ sizeof(struct rte_mbuf) -
+ RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
#else
- dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
+ dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
#endif
+ dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
return aligned_addr;
}
+/* Check if the netdev,qid context already exists */
+static inline bool
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+ struct pkt_rx_queue *list_rxq, const char *list_ifname)
+{
+ bool exists = false;
+
+ if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+ !strncmp(ifname, list_ifname, IFNAMSIZ)) {
+ AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+ ifname, rxq->xsk_queue_idx);
+ exists = true;
+ }
+
+ return exists;
+}
+
+/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
+static inline int
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+ struct xsk_umem_info **umem)
+{
+ struct internal_list *list;
+ struct pmd_internals *internals;
+ int i = 0, ret = 0;
+ struct rte_mempool *mb_pool = rxq->mb_pool;
+
+ if (mb_pool == NULL)
+ return ret;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ internals = list->eth_dev->data->dev_private;
+ for (i = 0; i < internals->queue_cnt; i++) {
+ struct pkt_rx_queue *list_rxq =
+ &internals->rx_queues[i];
+ if (rxq == list_rxq)
+ continue;
+ if (mb_pool == internals->rx_queues[i].mb_pool) {
+ if (ctx_exists(rxq, ifname, list_rxq,
+ internals->if_name)) {
+ ret = -1;
+ goto out;
+ }
+ if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
+ __ATOMIC_ACQUIRE)) {
+ *umem = internals->rx_queues[i].umem;
+ goto out;
+ }
+ }
+ }
+ }
+
+out:
+ pthread_mutex_unlock(&internal_list_lock);
+
+ return ret;
+}
+
static struct
xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
struct pkt_rx_queue *rxq)
__atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
}
+ return umem;
+
+err:
+ xdp_umem_destroy(umem);
+ return NULL;
+}
#else
static struct
xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
}
umem->mz = mz;
-#endif
return umem;
err:
xdp_umem_destroy(umem);
return NULL;
}
+#endif
static int
-load_custom_xdp_prog(const char *prog_path, int if_index)
+load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
{
int ret, prog_fd = -1;
struct bpf_object *obj;
- struct bpf_map *map;
ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (ret) {
/*
* The loaded program must provision for a map of xsks, such that some
- * traffic can be redirected to userspace. When the xsk is created,
- * libbpf inserts it into the map.
+ * traffic can be redirected to userspace.
*/
- map = bpf_object__find_map_by_name(obj, "xsks_map");
- if (!map) {
+ *map = bpf_object__find_map_by_name(obj, "xsks_map");
+ if (!*map) {
AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
return -1;
}
if (strnlen(internals->prog_path, PATH_MAX) &&
!internals->custom_prog_configured) {
ret = load_custom_xdp_prog(internals->prog_path,
- internals->if_index);
+ internals->if_index,
+ &internals->map);
if (ret) {
AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
internals->prog_path);
goto err;
}
internals->custom_prog_configured = 1;
+ cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
}
if (internals->shared_umem)
goto err;
}
+ /* insert the xsk into the xsks_map */
+ if (internals->custom_prog_configured) {
+ int err, fd;
+
+ fd = xsk_socket__fd(rxq->xsk);
+ err = bpf_map_update_elem(bpf_map__fd(internals->map),
+ &rxq->xsk_queue_idx, &fd, 0);
+ if (err) {
+ AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+ goto err;
+ }
+ }
+
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
if (ret) {
.promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
rte_vdev_device_name(dev));
name = rte_vdev_device_name(dev);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
- eth_dev = rte_eth_dev_attach_secondary(name);
- if (eth_dev == NULL) {
- AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
- return -EINVAL;
- }
- eth_dev->dev_ops = &ops;
- rte_eth_dev_probing_finish(eth_dev);
- return 0;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ AF_XDP_LOG(ERR, "Failed to probe %s. "
+ "AF_XDP PMD does not support secondary processes.\n",
+ name);
+ return -ENOTSUP;
}
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);