#include <sys/param.h>
#include <unistd.h>
-#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
+#include <rte_net.h>
#include <rte_vhost.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_pause.h>
+#include <rte_dmadev.h>
+#include <rte_vhost_async.h>
#include "main.h"
#define MAX_QUEUES 128
#endif
+#define NUM_MBUFS_DEFAULT 0x24000
+
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
#define BURST_RX_RETRIES 4 /* Number of retries on RX. */
#define JUMBO_FRAME_MAX_SIZE 0x2600
+#define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
/* State of virtio device. */
#define DEVICE_MAC_LEARNING 0
#define RTE_TEST_TX_DESC_DEFAULT 512
#define INVALID_PORT_ID 0xFF
+#define INVALID_DMA_ID -1
+
+#define DMA_RING_SIZE 4096
-/* Max number of devices. Limited by vmdq. */
-#define MAX_DEVICES 64
+#define ASYNC_ENQUEUE_VHOST 1
+#define ASYNC_DEQUEUE_VHOST 2
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
-/* Maximum long option length for option parsing. */
-#define MAX_LONG_OPT_SZ 64
+struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
+int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
+static int dma_count;
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
static uint32_t enable_tso;
static int client_mode;
-static int dequeue_zero_copy;
static int builtin_net_driver;
static char *socket_files;
static int nb_sockets;
-/* empty vmdq configuration structure. Filled in programatically */
+static struct vhost_queue_ops vdev_queue_ops[RTE_MAX_VHOST_DEVICE];
+
+/* empty VMDq configuration structure. Filled in programmatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
+ .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
+ * forward packets from one virtio dev to another virtio dev.
*/
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_VLAN_STRIP),
+ .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO),
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
+ .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO),
},
.rx_adv_conf = {
/*
* appropriate values
*/
.vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
+ .nb_queue_pools = RTE_ETH_8_POOLS,
.enable_default_pool = 0,
.default_pool = 0,
.nb_pool_maps = 0,
};
/* ethernet addresses of ports */
-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
static struct vhost_dev_tailq_list vhost_dev_list =
TAILQ_HEAD_INITIALIZER(vhost_dev_list);
struct rte_mbuf *m_table[MAX_PKT_BURST];
};
+struct vhost_bufftable {
+ uint32_t len;
+ uint64_t pre_tsc;
+ struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
/* TX queue for each data core. */
struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
+/*
+ * Vhost TX buffer for each data core.
+ * Every data core maintains a TX buffer for every vhost device,
+ * which is used for batch pkts enqueue for higher performance.
+ */
+struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * RTE_MAX_VHOST_DEVICE];
+
#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
/ US_PER_S * BURST_TX_DRAIN_US)
-#define VLAN_HLEN 4
+
+static int vid2socketid[RTE_MAX_VHOST_DEVICE];
+
+static inline uint32_t
+get_async_flag_by_socketid(int socketid)
+{
+ return dma_bind[socketid].async_flag;
+}
+
+static inline void
+init_vid2socketid_array(int vid, int socketid)
+{
+ vid2socketid[vid] = socketid;
+}
+
+static inline bool
+is_dma_configured(int16_t dev_id)
+{
+ int i;
+
+ for (i = 0; i < dma_count; i++)
+ if (dmas_id[i] == dev_id)
+ return true;
+ return false;
+}
+
+static inline int
+open_dma(const char *value)
+{
+ struct dma_for_vhost *dma_info = dma_bind;
+ char *input = strndup(value, strlen(value) + 1);
+ char *addrs = input;
+ char *ptrs[2];
+ char *start, *end, *substr;
+ int64_t socketid, vring_id;
+
+ struct rte_dma_info info;
+ struct rte_dma_conf dev_config = { .nb_vchans = 1 };
+ struct rte_dma_vchan_conf qconf = {
+ .direction = RTE_DMA_DIR_MEM_TO_MEM,
+ .nb_desc = DMA_RING_SIZE
+ };
+
+ int dev_id;
+ int ret = 0;
+ uint16_t i = 0;
+ char *dma_arg[RTE_MAX_VHOST_DEVICE];
+ int args_nr;
+
+ while (isblank(*addrs))
+ addrs++;
+ if (*addrs == '\0') {
+ ret = -1;
+ goto out;
+ }
+
+ /* process DMA devices within bracket. */
+ addrs++;
+ substr = strtok(addrs, ";]");
+ if (!substr) {
+ ret = -1;
+ goto out;
+ }
+
+ args_nr = rte_strsplit(substr, strlen(substr), dma_arg, RTE_MAX_VHOST_DEVICE, ',');
+ if (args_nr <= 0) {
+ ret = -1;
+ goto out;
+ }
+
+ while (i < args_nr) {
+ char *arg_temp = dma_arg[i];
+ char *txd, *rxd;
+ uint8_t sub_nr;
+ int async_flag;
+
+ sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
+ if (sub_nr != 2) {
+ ret = -1;
+ goto out;
+ }
+
+ txd = strstr(ptrs[0], "txd");
+ rxd = strstr(ptrs[0], "rxd");
+ if (txd) {
+ start = txd;
+ vring_id = VIRTIO_RXQ;
+ async_flag = ASYNC_ENQUEUE_VHOST;
+ } else if (rxd) {
+ start = rxd;
+ vring_id = VIRTIO_TXQ;
+ async_flag = ASYNC_DEQUEUE_VHOST;
+ } else {
+ ret = -1;
+ goto out;
+ }
+
+ start += 3;
+ socketid = strtol(start, &end, 0);
+ if (end == start) {
+ ret = -1;
+ goto out;
+ }
+
+ dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
+ if (dev_id < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
+ ret = -1;
+ goto out;
+ }
+
+ /* DMA device is already configured, so skip */
+ if (is_dma_configured(dev_id))
+ goto done;
+
+ if (rte_dma_info_get(dev_id, &info) != 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Error with rte_dma_info_get()\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (info.max_vchans < 1) {
+ RTE_LOG(ERR, VHOST_CONFIG, "No channels available on device %d\n", dev_id);
+ ret = -1;
+ goto out;
+ }
+
+ if (rte_dma_configure(dev_id, &dev_config) != 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Fail to configure DMA %d.\n", dev_id);
+ ret = -1;
+ goto out;
+ }
+
+ /* Check the max desc supported by DMA device */
+ rte_dma_info_get(dev_id, &info);
+ if (info.nb_vchans != 1) {
+ RTE_LOG(ERR, VHOST_CONFIG, "No configured queues reported by DMA %d.\n",
+ dev_id);
+ ret = -1;
+ goto out;
+ }
+
+ qconf.nb_desc = RTE_MIN(DMA_RING_SIZE, info.max_desc);
+
+ if (rte_dma_vchan_setup(dev_id, 0, &qconf) != 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Fail to set up DMA %d.\n", dev_id);
+ ret = -1;
+ goto out;
+ }
+
+ if (rte_dma_start(dev_id) != 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Fail to start DMA %u.\n", dev_id);
+ ret = -1;
+ goto out;
+ }
+
+ dmas_id[dma_count++] = dev_id;
+
+done:
+ (dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
+ (dma_info + socketid)->async_flag |= async_flag;
+ i++;
+ }
+out:
+ free(input);
+ return ret;
+}
/*
* Builds up the correct configuration for VMDQ VLAN pool map
return 0;
}
-/*
- * Validate the device number according to the max pool number gotten form
- * dev_info. If the device number is invalid, give the error message and
- * return -1. Each device must have its own pool.
- */
-static inline int
-validate_num_devices(uint32_t max_nb_devices)
-{
- if (num_devices > max_nb_devices) {
- RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
- return -1;
- }
- return 0;
-}
-
/*
* Initialises a given port using global settings and with the rx buffers
* coming from the mbuf_pool passed as parameter
uint16_t q;
/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
- rte_eth_dev_info_get (port, &dev_info);
+ retval = rte_eth_dev_info_get(port, &dev_info);
+ if (retval != 0) {
+ RTE_LOG(ERR, VHOST_PORT,
+ "Error during getting device (port %u) info: %s\n",
+ port, strerror(-retval));
+
+ return retval;
+ }
+ if (dev_info.max_vmdq_pools == 0) {
+ RTE_LOG(ERR, VHOST_PORT, "Failed to get VMDq info.\n");
+ return -1;
+ }
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
rxconf->rx_drop_en = 1;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
- /*
- * When dequeue zero copy is enabled, guest Tx used vring will be
- * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
- * (tx_ring_size here) must be small enough so that the driver will
- * hit the free threshold easily and free mbufs timely. Otherwise,
- * guest Tx vring would be starved.
- */
- if (dequeue_zero_copy)
- tx_ring_size = 64;
-
tx_rings = (uint16_t)rte_lcore_count();
- retval = validate_num_devices(MAX_DEVICES);
- if (retval < 0)
- return retval;
+ if (mergeable) {
+ if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
+ vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
+ else
+ vmdq_conf_default.rxmode.mtu = MAX_MTU;
+ }
/* Get port configuration. */
retval = get_eth_conf(&port_conf, num_devices);
return -1;
rx_rings = (uint16_t)dev_info.max_rx_queues;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0) {
return retval;
}
- if (promiscuous)
- rte_eth_promiscuous_enable(port);
+ if (promiscuous) {
+ retval = rte_eth_promiscuous_enable(port);
+ if (retval != 0) {
+ RTE_LOG(ERR, VHOST_PORT,
+ "Failed to enable promiscuous mode on port %u: %s\n",
+ port, rte_strerror(-retval));
+ return retval;
+ }
+ }
+
+ retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
+ if (retval < 0) {
+ RTE_LOG(ERR, VHOST_PORT,
+ "Failed to get MAC address on port %u: %s\n",
+ port, rte_strerror(-retval));
+ return retval;
+ }
- rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
- " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- port,
- vmdq_ports_eth_addr[port].addr_bytes[0],
- vmdq_ports_eth_addr[port].addr_bytes[1],
- vmdq_ports_eth_addr[port].addr_bytes[2],
- vmdq_ports_eth_addr[port].addr_bytes[3],
- vmdq_ports_eth_addr[port].addr_bytes[4],
- vmdq_ports_eth_addr[port].addr_bytes[5]);
+ " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+ port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
return 0;
}
static int
us_vhost_parse_socket_path(const char *q_arg)
{
+ char *old;
+
/* parse number string */
if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
return -1;
+ old = socket_files;
socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
- snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
+ if (socket_files == NULL) {
+ free(old);
+ return -1;
+ }
+
+ strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
nb_sockets++;
return 0;
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
- return -1;
-
- if (pm == 0)
- return -1;
+ return 0;
return pm;
" --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
" --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
- " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
+ " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
" --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
" --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
" --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
" --tso [0|1] disable/enable TCP segment offload.\n"
" --client register a vhost-user socket as client mode.\n"
- " --dequeue-zero-copy enables dequeue zero copy\n",
+ " --dmas register dma channel for specific vhost device.\n"
+ " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
prgname);
}
+enum {
+#define OPT_VM2VM "vm2vm"
+ OPT_VM2VM_NUM = 256,
+#define OPT_RX_RETRY "rx-retry"
+ OPT_RX_RETRY_NUM,
+#define OPT_RX_RETRY_DELAY "rx-retry-delay"
+ OPT_RX_RETRY_DELAY_NUM,
+#define OPT_RX_RETRY_NUMB "rx-retry-num"
+ OPT_RX_RETRY_NUMB_NUM,
+#define OPT_MERGEABLE "mergeable"
+ OPT_MERGEABLE_NUM,
+#define OPT_STATS "stats"
+ OPT_STATS_NUM,
+#define OPT_SOCKET_FILE "socket-file"
+ OPT_SOCKET_FILE_NUM,
+#define OPT_TX_CSUM "tx-csum"
+ OPT_TX_CSUM_NUM,
+#define OPT_TSO "tso"
+ OPT_TSO_NUM,
+#define OPT_CLIENT "client"
+ OPT_CLIENT_NUM,
+#define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
+ OPT_BUILTIN_NET_DRIVER_NUM,
+#define OPT_DMAS "dmas"
+ OPT_DMAS_NUM,
+#define OPT_NUM_MBUFS "total-num-mbufs"
+ OPT_NUM_MBUFS_NUM,
+};
+
/*
* Parse the arguments given in the command line of the application.
*/
unsigned i;
const char *prgname = argv[0];
static struct option long_option[] = {
- {"vm2vm", required_argument, NULL, 0},
- {"rx-retry", required_argument, NULL, 0},
- {"rx-retry-delay", required_argument, NULL, 0},
- {"rx-retry-num", required_argument, NULL, 0},
- {"mergeable", required_argument, NULL, 0},
- {"stats", required_argument, NULL, 0},
- {"socket-file", required_argument, NULL, 0},
- {"tx-csum", required_argument, NULL, 0},
- {"tso", required_argument, NULL, 0},
- {"client", no_argument, &client_mode, 1},
- {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
- {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
+ {OPT_VM2VM, required_argument,
+ NULL, OPT_VM2VM_NUM},
+ {OPT_RX_RETRY, required_argument,
+ NULL, OPT_RX_RETRY_NUM},
+ {OPT_RX_RETRY_DELAY, required_argument,
+ NULL, OPT_RX_RETRY_DELAY_NUM},
+ {OPT_RX_RETRY_NUMB, required_argument,
+ NULL, OPT_RX_RETRY_NUMB_NUM},
+ {OPT_MERGEABLE, required_argument,
+ NULL, OPT_MERGEABLE_NUM},
+ {OPT_STATS, required_argument,
+ NULL, OPT_STATS_NUM},
+ {OPT_SOCKET_FILE, required_argument,
+ NULL, OPT_SOCKET_FILE_NUM},
+ {OPT_TX_CSUM, required_argument,
+ NULL, OPT_TX_CSUM_NUM},
+ {OPT_TSO, required_argument,
+ NULL, OPT_TSO_NUM},
+ {OPT_CLIENT, no_argument,
+ NULL, OPT_CLIENT_NUM},
+ {OPT_BUILTIN_NET_DRIVER, no_argument,
+ NULL, OPT_BUILTIN_NET_DRIVER_NUM},
+ {OPT_DMAS, required_argument,
+ NULL, OPT_DMAS_NUM},
+ {OPT_NUM_MBUFS, required_argument,
+ NULL, OPT_NUM_MBUFS_NUM},
{NULL, 0, 0, 0},
};
case 'P':
promiscuous = 1;
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
- ETH_VMDQ_ACCEPT_BROADCAST |
- ETH_VMDQ_ACCEPT_MULTICAST;
+ RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+ RTE_ETH_VMDQ_ACCEPT_MULTICAST;
+ break;
+ case OPT_VM2VM_NUM:
+ ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for "
+ "vm2vm [0|1|2]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+ vm2vm_mode = (vm2vm_type)ret;
break;
- case 0:
- /* Enable/disable vm2vm comms. */
- if (!strncmp(long_option[option_index].name, "vm2vm",
- MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for "
- "vm2vm [0|1|2]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- vm2vm_mode = (vm2vm_type)ret;
- }
+ case OPT_RX_RETRY_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_retry = ret;
+ break;
- /* Enable/disable retries on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_retry = ret;
- }
+ case OPT_TX_CSUM_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_tx_csum = ret;
+ break;
- /* Enable/disable TX checksum offload. */
- if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- enable_tx_csum = ret;
+ case OPT_TSO_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_tso = ret;
+ break;
- /* Enable/disable TSO offload. */
- if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- enable_tso = ret;
+ case OPT_RX_RETRY_DELAY_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ burst_rx_delay_time = ret;
+ break;
- /* Specify the retries delay time (in useconds) on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- burst_rx_delay_time = ret;
- }
+ case OPT_RX_RETRY_NUMB_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ burst_rx_retry_num = ret;
+ break;
- /* Specify the retries number on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- burst_rx_retry_num = ret;
- }
+ case OPT_MERGEABLE_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ mergeable = !!ret;
+ break;
- /* Enable/disable RX mergeable buffers. */
- if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- mergeable = !!ret;
- if (ret) {
- vmdq_conf_default.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- vmdq_conf_default.rxmode.max_rx_pkt_len
- = JUMBO_FRAME_MAX_SIZE;
- }
- }
+ case OPT_STATS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for stats [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+ enable_stats = ret;
+ break;
+
+ /* Set socket file path. */
+ case OPT_SOCKET_FILE_NUM:
+ if (us_vhost_parse_socket_path(optarg) == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for socket name (Max %d characters)\n",
+ PATH_MAX);
+ us_vhost_usage(prgname);
+ return -1;
}
+ break;
- /* Enable/disable stats. */
- if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for stats [0..N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_stats = ret;
- }
+ case OPT_DMAS_NUM:
+ if (open_dma(optarg) == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Wrong DMA args\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ break;
- /* Set socket file path. */
- if (!strncmp(long_option[option_index].name,
- "socket-file", MAX_LONG_OPT_SZ)) {
- if (us_vhost_parse_socket_path(optarg) == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for socket name (Max %d characters)\n",
- PATH_MAX);
- us_vhost_usage(prgname);
- return -1;
- }
+ case OPT_NUM_MBUFS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for total-num-mbufs [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ if (total_num_mbufs < ret)
+ total_num_mbufs = ret;
+ break;
+
+ case OPT_CLIENT_NUM:
+ client_mode = 1;
break;
- /* Invalid option - print options. */
+ case OPT_BUILTIN_NET_DRIVER_NUM:
+ builtin_net_driver = 1;
+ break;
+
+ /* Invalid option - print options. */
default:
us_vhost_usage(prgname);
return -1;
}
static __rte_always_inline struct vhost_dev *
-find_vhost_dev(struct ether_addr *mac)
+find_vhost_dev(struct rte_ether_addr *mac)
{
struct vhost_dev *vdev;
TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
if (vdev->ready == DEVICE_RX &&
- is_same_ether_addr(mac, &vdev->mac_address))
+ rte_is_same_ether_addr(mac, &vdev->mac_address))
return vdev;
}
static int
link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
{
- struct ether_hdr *pkt_hdr;
+ struct rte_ether_hdr *pkt_hdr;
int i, ret;
/* Learn MAC address of guest device from packet */
- pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- if (find_vhost_dev(&pkt_hdr->s_addr)) {
+ if (find_vhost_dev(&pkt_hdr->src_addr)) {
RTE_LOG(ERR, VHOST_DATA,
"(%d) device is using a registered MAC!\n",
vdev->vid);
return -1;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ vdev->mac_address.addr_bytes[i] =
+ pkt_hdr->src_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
vdev->vlan_tag = vlan_tags[vdev->vid];
/* Print out VMDQ registration info. */
RTE_LOG(INFO, VHOST_DATA,
- "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
- vdev->vid,
- vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
- vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
- vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
+ "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
+ vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
vdev->vlan_tag);
/* Register the MAC address. */
}
}
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+ while (n--)
+ rte_pktmbuf_free(pkts[n]);
+}
+
+static __rte_always_inline void
+complete_async_pkts(struct vhost_dev *vdev)
+{
+ struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+ uint16_t complete_count;
+ int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+ VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
+ if (complete_count)
+ free_pkts(p_cpl, complete_count);
+
+}
+
static __rte_always_inline void
-virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
struct rte_mbuf *m)
{
uint16_t ret;
}
if (enable_stats) {
- rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
- rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+ __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
}
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+ uint16_t ret;
+ uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+ ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
+
+ if (enable_stats) {
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
+ }
+
+ if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
+ free_pkts(m, nr_xmit);
+}
+
+static __rte_always_inline void
+drain_vhost_table(void)
+{
+ uint16_t lcore_id = rte_lcore_id();
+ struct vhost_bufftable *vhost_txq;
+ struct vhost_dev *vdev;
+ uint64_t cur_tsc;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (unlikely(vdev->remove == 1))
+ continue;
+
+ vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
+
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - vhost_txq->pre_tsc
+ > MBUF_TABLE_DRAIN_TSC)) {
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
+ "Vhost TX queue drained after timeout with burst size %u\n",
+ vhost_txq->len);
+ drain_vhost(vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = cur_tsc;
+ }
+ }
+}
+
/*
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
static __rte_always_inline int
virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
{
- struct ether_hdr *pkt_hdr;
+ struct rte_ether_hdr *pkt_hdr;
struct vhost_dev *dst_vdev;
+ struct vhost_bufftable *vhost_txq;
+ uint16_t lcore_id = rte_lcore_id();
+ pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
-
- dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
if (!dst_vdev)
return -1;
return 0;
}
- virtio_xmit(dst_vdev, vdev, m);
+ vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + dst_vdev->vid];
+ vhost_txq->m_table[vhost_txq->len++] = m;
+
+ if (enable_stats) {
+ vdev->stats.tx_total++;
+ vdev->stats.tx++;
+ }
+
+ if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
+ drain_vhost(dst_vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = rte_rdtsc();
+ }
return 0;
}
uint32_t *offset, uint16_t *vlan_tag)
{
struct vhost_dev *dst_vdev;
- struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct rte_ether_hdr *pkt_hdr =
+ rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
if (!dst_vdev)
return 0;
* by minus length of vlan tag, so need restore
* the packet length by plus it.
*/
- *offset = VLAN_HLEN;
+ *offset = RTE_VLAN_HLEN;
*vlan_tag = vlan_tags[vdev->vid];
RTE_LOG_DP(DEBUG, VHOST_DATA,
return 0;
}
-static uint16_t
-get_psd_sum(void *l3_hdr, uint64_t ol_flags)
-{
- if (ol_flags & PKT_TX_IPV4)
- return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
- return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
static void virtio_tx_offload(struct rte_mbuf *m)
{
+ struct rte_net_hdr_lens hdr_lens;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
+ uint32_t ptype;
void *l3_hdr;
- struct ipv4_hdr *ipv4_hdr = NULL;
- struct tcp_hdr *tcp_hdr = NULL;
- struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- l3_hdr = (char *)eth_hdr + m->l2_len;
+ ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+ m->l2_len = hdr_lens.l2_len;
+ m->l3_len = hdr_lens.l3_len;
+ m->l4_len = hdr_lens.l4_len;
+
+ l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
+ tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
+ m->l2_len + m->l3_len);
- if (m->ol_flags & PKT_TX_IPV4) {
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
+ if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
- m->ol_flags |= PKT_TX_IP_CKSUM;
+ tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
+ } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
+ tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
}
-
- tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
- tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
-}
-
-static inline void
-free_pkts(struct rte_mbuf **pkts, uint16_t n)
-{
- while (n--)
- rte_pktmbuf_free(pkts[n]);
}
static __rte_always_inline void
struct mbuf_table *tx_q;
unsigned offset = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct ether_hdr *nh;
+ struct rte_ether_hdr *nh;
- nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
+ nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
struct vhost_dev *vdev2;
TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
if (vdev2 != vdev)
- virtio_xmit(vdev2, vdev, m);
+ sync_virtio_xmit(vdev2, vdev, m);
}
goto queue2nic;
}
/*check if destination is local VM*/
- if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
- rte_pktmbuf_free(m);
+ if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
return;
- }
if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
if (unlikely(find_local_dest(vdev, m, &offset,
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
- nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
+ nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
/* Guest has inserted the vlan tag. */
- struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
+ struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
if ((vm2vm_mode == VM2VM_HARDWARE) &&
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags |= PKT_TX_VLAN_PKT;
+ m->ol_flags |= RTE_MBUF_F_TX_VLAN;
/*
* Find the right seg to adjust the data len when offset is
m->vlan_tci = vlan_tag;
}
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & RTE_MBUF_F_RX_LRO)
virtio_tx_offload(m);
tx_q->m_table[tx_q->len++] = m;
}
}
+uint16_t
+async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ uint16_t enqueue_count;
+ uint16_t enqueue_fail = 0;
+ uint16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_async_pkts(dev);
+ enqueue_count = rte_vhost_submit_enqueue_burst(dev->vid, queue_id,
+ pkts, rx_count, dma_id, 0);
+
+ enqueue_fail = rx_count - enqueue_count;
+ if (enqueue_fail)
+ free_pkts(&pkts[enqueue_count], enqueue_fail);
+
+ return enqueue_count;
+}
+
+uint16_t
+sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, rx_count);
+}
+
static __rte_always_inline void
drain_eth_rx(struct vhost_dev *vdev)
{
rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
pkts, MAX_PKT_BURST);
+
if (!rx_count)
return;
}
}
- if (builtin_net_driver) {
- enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
- pkts, rx_count);
- } else {
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- pkts, rx_count);
- }
+ enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+ VIRTIO_RXQ, pkts, rx_count);
+
if (enable_stats) {
- rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
- rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
+ __ATOMIC_SEQ_CST);
}
- free_pkts(pkts, rx_count);
+ if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
+ free_pkts(pkts, rx_count);
+}
+
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ int nr_inflight;
+ uint16_t dequeue_count;
+ int16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_TXQ].dev_id;
+
+ dequeue_count = rte_vhost_async_try_dequeue_burst(dev->vid, queue_id,
+ mbuf_pool, pkts, count, &nr_inflight, dma_id, 0);
+
+ return dequeue_count;
+}
+
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ return rte_vhost_dequeue_burst(dev->vid, queue_id, mbuf_pool, pkts, count);
}
static __rte_always_inline void
uint16_t count;
uint16_t i;
- if (builtin_net_driver) {
- count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
- pkts, MAX_PKT_BURST);
- } else {
- count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
- mbuf_pool, pkts, MAX_PKT_BURST);
- }
+ count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev,
+ VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST);
/* setup VMDq for the first packet */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
tx_q = &lcore_tx_queue[lcore_id];
for (i = 0; i < rte_lcore_count(); i++) {
while(1) {
drain_mbuf_table(tx_q);
-
+ drain_vhost_table();
/*
* Inform the configuration core that we have exited the
* linked list and that no devices are in use if requested.
return 0;
}
+static void
+vhost_clear_queue_thread_unsafe(struct vhost_dev *vdev, uint16_t queue_id)
+{
+ uint16_t n_pkt = 0;
+ int pkts_inflight;
+
+ int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+ pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vdev->vid, queue_id);
+
+ struct rte_mbuf *m_cpl[pkts_inflight];
+
+ while (pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vdev->vid, queue_id, m_cpl,
+ pkts_inflight, dma_id, 0);
+ free_pkts(m_cpl, n_pkt);
+ pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vdev->vid,
+ queue_id);
+ }
+}
+
/*
* Remove a device from the specific data core linked list and from the
- * main linked list. Synchonization occurs through the use of the
+ * main linked list. Synchronization occurs through the use of the
* lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
{
struct vhost_dev *vdev = NULL;
int lcore;
+ uint16_t i;
TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
if (vdev->vid == vid)
rte_pause();
}
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ rte_free(vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]);
+
if (builtin_net_driver)
vs_vhost_net_remove(vdev);
/* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore)
+ RTE_LCORE_FOREACH_WORKER(lcore)
lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
/*
* we can be sure that they can no longer access the device removed
* from the linked lists and that the devices are no longer in use.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
rte_pause();
}
"(%d) device has been removed from data core\n",
vdev->vid);
+ if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
+ vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
+ rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
+ }
+
+ if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
+ vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+ rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
+ dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
+ }
+
rte_free(vdev);
}
+static inline int
+get_socketid_by_vid(int vid)
+{
+ int i;
+ char ifname[PATH_MAX];
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+ for (i = 0; i < nb_sockets; i++) {
+ char *file = socket_files + i * PATH_MAX;
+ if (strcmp(file, ifname) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+init_vhost_queue_ops(int vid)
+{
+ if (builtin_net_driver) {
+ vdev_queue_ops[vid].enqueue_pkt_burst = builtin_enqueue_pkts;
+ vdev_queue_ops[vid].dequeue_pkt_burst = builtin_dequeue_pkts;
+ } else {
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled)
+ vdev_queue_ops[vid].enqueue_pkt_burst = async_enqueue_pkts;
+ else
+ vdev_queue_ops[vid].enqueue_pkt_burst = sync_enqueue_pkts;
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled)
+ vdev_queue_ops[vid].dequeue_pkt_burst = async_dequeue_pkts;
+ else
+ vdev_queue_ops[vid].dequeue_pkt_burst = sync_dequeue_pkts;
+ }
+
+ return 0;
+}
+
+static inline int
+vhost_async_channel_register(int vid)
+{
+ int rx_ret = 0, tx_ret = 0;
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
+ rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
+ if (rx_ret == 0)
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
+ }
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id != INVALID_DMA_ID) {
+ tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
+ if (tx_ret == 0)
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
+ }
+
+ return rx_ret | tx_ret;
+}
+
+
+
/*
* A new device is added to a data core. First the device is added to the main linked list
- * and the allocated to a specific data core.
+ * and then allocated to a specific data core.
*/
static int
new_device(int vid)
{
int lcore, core_add = 0;
+ uint16_t i;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
+ int ret;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
}
vdev->vid = vid;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]
+ = rte_zmalloc("vhost bufftable",
+ sizeof(struct vhost_bufftable),
+ RTE_CACHE_LINE_SIZE);
+
+ if (vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid] == NULL) {
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) couldn't allocate memory for vhost TX\n", vid);
+ return -1;
+ }
+ }
+
+ int socketid = get_socketid_by_vid(vid);
+ if (socketid == -1)
+ return -1;
+
+ init_vid2socketid_array(vid, socketid);
+
+ ret = vhost_async_channel_register(vid);
+
+ if (init_vhost_queue_ops(vid) != 0)
+ return -1;
+
if (builtin_net_driver)
vs_vhost_net_setup(vdev);
vdev->remove = 0;
/* Find a suitable lcore to add the device. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
if (lcore_info[lcore].device_num < device_num_min) {
device_num_min = lcore_info[lcore].device_num;
core_add = lcore;
"(%d) device has been added to data core %d\n",
vid, vdev->coreid);
+ return ret;
+}
+
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
+ if (!enable)
+ vhost_clear_queue_thread_unsafe(vdev, queue_id);
+ }
+
return 0;
}
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct vhost_device_ops virtio_net_device_ops =
+static const struct rte_vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
- rx = rte_atomic64_read(&vdev->stats.rx_atomic);
+ rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
+ __ATOMIC_SEQ_CST);
+ rx = __atomic_load_n(&vdev->stats.rx_atomic,
+ __ATOMIC_SEQ_CST);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
}
printf("===================================================\n");
+
+ fflush(stdout);
}
return NULL;
exit(0);
}
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- * also make some reservation for receiving the packets from virtio
- * Tx queue. How many is enough depends on the usage. It's normally
- * a simple calculation like following:
- *
- * MAX_PKT_BURST * max packet size / mbuf size
- *
- * So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- * mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- * enough mbufs to fill up the mbuf cache.
- */
static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
- uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+reset_dma(void)
{
- uint32_t nr_mbufs;
- uint32_t nr_mbufs_per_core;
- uint32_t mtu = 1500;
-
- if (mergeable)
- mtu = 9000;
- if (enable_tso)
- mtu = 64 * 1024;
-
- nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
- (mbuf_size - RTE_PKTMBUF_HEADROOM);
- nr_mbufs_per_core += nr_rx_desc;
- nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
- nr_mbufs = nr_queues * nr_rx_desc;
- nr_mbufs += nr_mbufs_per_core * nr_switch_core;
- nr_mbufs *= nr_port;
-
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
- nr_mbuf_cache, 0, mbuf_size,
- rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+ int i;
+
+ for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
+ int j;
+
+ for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
+ dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
+ dma_bind[i].dmas[j].async_enabled = false;
+ }
+ }
+
+ for (i = 0; i < RTE_DMADEV_DEFAULT_MAX; i++)
+ dmas_id[i] = INVALID_DMA_ID;
}
/*
int ret, i;
uint16_t portid;
static pthread_t tid;
- uint64_t flags = 0;
+ uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
signal(SIGINT, sigint_handler);
argc -= ret;
argv += ret;
+ /* initialize dma structures */
+ reset_dma();
+
/* parse app arguments */
ret = us_vhost_parse_args(argc, argv);
if (ret < 0)
* many queues here. We probably should only do allocation for
* those queues we are going to use.
*/
- create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
- MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+ MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
}
/* Launch all data cores. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
rte_eal_remote_launch(switch_worker, NULL, lcore_id);
if (client_mode)
flags |= RTE_VHOST_USER_CLIENT;
- if (dequeue_zero_copy)
- flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+ for (i = 0; i < dma_count; i++) {
+ if (rte_vhost_async_dma_configure(dmas_id[i], 0) < 0) {
+ RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
+ rte_exit(EXIT_FAILURE, "Cannot use given DMA device\n");
+ }
+ }
/* Register vhost user driver to handle vhost messages. */
for (i = 0; i < nb_sockets; i++) {
char *file = socket_files + i * PATH_MAX;
+
+ if (dma_count && get_async_flag_by_socketid(i) != 0)
+ flags = flags | RTE_VHOST_USER_ASYNC_COPY;
+
ret = rte_vhost_driver_register(file, flags);
if (ret != 0) {
unregister_drivers(i);
}
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
rte_eal_wait_lcore(lcore_id);
- return 0;
+ /* clean up the EAL */
+ rte_eal_cleanup();
+ return 0;
}