A very simple vhost-user net driver which demonstrates how to use the generic
vhost APIs will be used when this option is given. It is disabled by default.
+**--dma-type**
+This parameter is used to specify DMA type for async vhost-user net driver which
+demonstrates how to use the async vhost APIs. It's used in combination with dmas.
+
+**--dmas**
+This parameter is used to specify the assigned DMA device of a vhost device.
+Async vhost-user net driver will be used if --dmas is set. For example
+--dmas [txd0@00:04.0,txd1@00:04.1] means use DMA channel 00:04.0 for vhost
+device 0 enqueue operation and use DMA channel 00:04.1 for vhost device 1
+enqueue operation.
+
Common Issues
-------------
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+#include <rte_rawdev.h>
+#include <rte_ioat_rawdev.h>
+
+#include "ioat.h"
+#include "main.h"
+
+struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
+
+int
+open_ioat(const char *value)
+{
+ struct dma_for_vhost *dma_info = dma_bind;
+ char *input = strndup(value, strlen(value) + 1);
+ char *addrs = input;
+ char *ptrs[2];
+ char *start, *end, *substr;
+ int64_t vid, vring_id;
+ struct rte_ioat_rawdev_config config;
+ struct rte_rawdev_info info = { .dev_private = &config };
+ char name[32];
+ int dev_id;
+ int ret = 0;
+ uint16_t i = 0;
+ char *dma_arg[MAX_VHOST_DEVICE];
+ uint8_t args_nr;
+
+ while (isblank(*addrs))
+ addrs++;
+ if (*addrs == '\0') {
+ ret = -1;
+ goto out;
+ }
+
+ /* process DMA devices within bracket. */
+ addrs++;
+ substr = strtok(addrs, ";]");
+ if (!substr) {
+ ret = -1;
+ goto out;
+ }
+ args_nr = rte_strsplit(substr, strlen(substr),
+ dma_arg, MAX_VHOST_DEVICE, ',');
+ do {
+ char *arg_temp = dma_arg[i];
+ rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
+
+ start = strstr(ptrs[0], "txd");
+ if (start == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ start += 3;
+ vid = strtol(start, &end, 0);
+ if (end == start) {
+ ret = -1;
+ goto out;
+ }
+
+ vring_id = 0 + VIRTIO_RXQ;
+ if (rte_pci_addr_parse(ptrs[1],
+ &(dma_info + vid)->dmas[vring_id].addr) < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
+ name, sizeof(name));
+ dev_id = rte_rawdev_get_dev_id(name);
+ if (dev_id == (uint16_t)(-ENODEV) ||
+ dev_id == (uint16_t)(-EINVAL)) {
+ ret = -1;
+ goto out;
+ }
+
+ if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
+ strstr(info.driver_name, "ioat") == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
+ (dma_info + vid)->dmas[vring_id].is_valid = true;
+ config.ring_size = IOAT_RING_SIZE;
+ config.hdls_disable = true;
+ if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
+ ret = -1;
+ goto out;
+ }
+ rte_rawdev_start(dev_id);
+
+ dma_info->nr++;
+ i++;
+ } while (i < args_nr);
+out:
+ free(input);
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#ifndef _IOAT_H_
+#define _IOAT_H_
+
+#include <rte_vhost.h>
+#include <rte_pci.h>
+
+#define MAX_VHOST_DEVICE 1024
+#define IOAT_RING_SIZE 4096
+
+struct dma_info {
+ struct rte_pci_addr addr;
+ uint16_t dev_id;
+ bool is_valid;
+};
+
+struct dma_for_vhost {
+ struct dma_info dmas[RTE_MAX_QUEUES_PER_PORT * 2];
+ uint16_t nr;
+};
+
+#ifdef RTE_ARCH_X86
+int open_ioat(const char *value);
+#else
+static int open_ioat(const char *value __rte_unused)
+{
+ return -1;
+}
+#endif
+#endif /* _IOAT_H_ */
#include <rte_tcp.h>
#include <rte_pause.h>
+#include "ioat.h"
#include "main.h"
#ifndef MAX_QUEUES
static int builtin_net_driver;
+static int async_vhost_driver;
+
+static char dma_type[MAX_LONG_OPT_SZ];
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
/ US_PER_S * BURST_TX_DRAIN_US)
#define VLAN_HLEN 4
+static inline int
+open_dma(const char *value)
+{
+ if (strncmp(dma_type, "ioat", 4) == 0)
+ return open_ioat(value);
+
+ return -1;
+}
+
/*
* Builds up the correct configuration for VMDQ VLAN pool map
* according to the pool & queue limits.
" --socket-file: The path of the socket file.\n"
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
" --tso [0|1] disable/enable TCP segment offload.\n"
- " --client register a vhost-user socket as client mode.\n",
+ " --client register a vhost-user socket as client mode.\n"
+ " --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
+ " --dmas register dma channel for specific vhost device.\n",
prgname);
}
{"tso", required_argument, NULL, 0},
{"client", no_argument, &client_mode, 1},
{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
+ {"dma-type", required_argument, NULL, 0},
+ {"dmas", required_argument, NULL, 0},
{NULL, 0, 0, 0},
};
}
}
+ if (!strncmp(long_option[option_index].name,
+ "dma-type", MAX_LONG_OPT_SZ)) {
+ strcpy(dma_type, optarg);
+ }
+
+ if (!strncmp(long_option[option_index].name,
+ "dmas", MAX_LONG_OPT_SZ)) {
+ if (open_dma(optarg) == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Wrong DMA args\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+ async_vhost_driver = 1;
+ }
+
break;
/* Invalid option - print options. */
sources = files(
'main.c', 'virtio_net.c'
)
+
+if dpdk_conf.has('RTE_ARCH_X86')
+ deps += 'raw_ioat'
+ sources += files('ioat.c')
+endif