#include <sys/param.h>
#include <unistd.h>
-#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_log.h>
struct rte_mbuf *m_table[MAX_PKT_BURST];
};
+struct vhost_bufftable {
+ uint32_t len;
+ uint64_t pre_tsc;
+ struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
/* TX queue for each data core. */
struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
+/*
+ * Vhost TX buffer for each data core.
+ * Every data core maintains a TX buffer for every vhost device,
+ * which is used for batch pkts enqueue for higher performance.
+ */
+struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
+
#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
/ US_PER_S * BURST_TX_DRAIN_US)
#define VLAN_HLEN 4
prgname);
}
+enum {
+#define OPT_VM2VM "vm2vm"
+ OPT_VM2VM_NUM = 256,
+#define OPT_RX_RETRY "rx-retry"
+ OPT_RX_RETRY_NUM,
+#define OPT_RX_RETRY_DELAY "rx-retry-delay"
+ OPT_RX_RETRY_DELAY_NUM,
+#define OPT_RX_RETRY_NUMB "rx-retry-num"
+ OPT_RX_RETRY_NUMB_NUM,
+#define OPT_MERGEABLE "mergeable"
+ OPT_MERGEABLE_NUM,
+#define OPT_STATS "stats"
+ OPT_STATS_NUM,
+#define OPT_SOCKET_FILE "socket-file"
+ OPT_SOCKET_FILE_NUM,
+#define OPT_TX_CSUM "tx-csum"
+ OPT_TX_CSUM_NUM,
+#define OPT_TSO "tso"
+ OPT_TSO_NUM,
+#define OPT_CLIENT "client"
+ OPT_CLIENT_NUM,
+#define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
+ OPT_BUILTIN_NET_DRIVER_NUM,
+#define OPT_DMA_TYPE "dma-type"
+ OPT_DMA_TYPE_NUM,
+#define OPT_DMAS "dmas"
+ OPT_DMAS_NUM,
+};
+
/*
* Parse the arguments given in the command line of the application.
*/
unsigned i;
const char *prgname = argv[0];
static struct option long_option[] = {
- {"vm2vm", required_argument, NULL, 0},
- {"rx-retry", required_argument, NULL, 0},
- {"rx-retry-delay", required_argument, NULL, 0},
- {"rx-retry-num", required_argument, NULL, 0},
- {"mergeable", required_argument, NULL, 0},
- {"stats", required_argument, NULL, 0},
- {"socket-file", required_argument, NULL, 0},
- {"tx-csum", required_argument, NULL, 0},
- {"tso", required_argument, NULL, 0},
- {"client", no_argument, &client_mode, 1},
- {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
- {"dma-type", required_argument, NULL, 0},
- {"dmas", required_argument, NULL, 0},
+ {OPT_VM2VM, required_argument,
+ NULL, OPT_VM2VM_NUM},
+ {OPT_RX_RETRY, required_argument,
+ NULL, OPT_RX_RETRY_NUM},
+ {OPT_RX_RETRY_DELAY, required_argument,
+ NULL, OPT_RX_RETRY_DELAY_NUM},
+ {OPT_RX_RETRY_NUMB, required_argument,
+ NULL, OPT_RX_RETRY_NUMB_NUM},
+ {OPT_MERGEABLE, required_argument,
+ NULL, OPT_MERGEABLE_NUM},
+ {OPT_STATS, required_argument,
+ NULL, OPT_STATS_NUM},
+ {OPT_SOCKET_FILE, required_argument,
+ NULL, OPT_SOCKET_FILE_NUM},
+ {OPT_TX_CSUM, required_argument,
+ NULL, OPT_TX_CSUM_NUM},
+ {OPT_TSO, required_argument,
+ NULL, OPT_TSO_NUM},
+ {OPT_CLIENT, no_argument,
+ NULL, OPT_CLIENT_NUM},
+ {OPT_BUILTIN_NET_DRIVER, no_argument,
+ NULL, OPT_BUILTIN_NET_DRIVER_NUM},
+ {OPT_DMA_TYPE, required_argument,
+ NULL, OPT_DMA_TYPE_NUM},
+ {OPT_DMAS, required_argument,
+ NULL, OPT_DMAS_NUM},
{NULL, 0, 0, 0},
};
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
ETH_VMDQ_ACCEPT_BROADCAST |
ETH_VMDQ_ACCEPT_MULTICAST;
-
break;
- case 0:
- /* Enable/disable vm2vm comms. */
- if (!strncmp(long_option[option_index].name, "vm2vm",
- MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for "
- "vm2vm [0|1|2]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- vm2vm_mode = (vm2vm_type)ret;
- }
+ case OPT_VM2VM_NUM:
+ ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for "
+ "vm2vm [0|1|2]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ vm2vm_mode = (vm2vm_type)ret;
+ break;
- /* Enable/disable retries on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_retry = ret;
- }
+ case OPT_RX_RETRY_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_retry = ret;
+ break;
- /* Enable/disable TX checksum offload. */
- if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- enable_tx_csum = ret;
+ case OPT_TX_CSUM_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_tx_csum = ret;
+ break;
- /* Enable/disable TSO offload. */
- if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- enable_tso = ret;
+ case OPT_TSO_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_tso = ret;
+ break;
- /* Specify the retries delay time (in useconds) on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- burst_rx_delay_time = ret;
- }
+ case OPT_RX_RETRY_DELAY_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ burst_rx_delay_time = ret;
+ break;
- /* Specify the retries number on RX. */
- if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- burst_rx_retry_num = ret;
- }
+ case OPT_RX_RETRY_NUMB_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ burst_rx_retry_num = ret;
+ break;
- /* Enable/disable RX mergeable buffers. */
- if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- mergeable = !!ret;
- if (ret) {
- vmdq_conf_default.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- vmdq_conf_default.rxmode.max_rx_pkt_len
- = JUMBO_FRAME_MAX_SIZE;
- }
- }
+ case OPT_MERGEABLE_NUM:
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
-
- /* Enable/disable stats. */
- if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for stats [0..N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_stats = ret;
- }
+ mergeable = !!ret;
+ if (ret) {
+ vmdq_conf_default.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ vmdq_conf_default.rxmode.max_rx_pkt_len
+ = JUMBO_FRAME_MAX_SIZE;
}
+ break;
- /* Set socket file path. */
- if (!strncmp(long_option[option_index].name,
- "socket-file", MAX_LONG_OPT_SZ)) {
- if (us_vhost_parse_socket_path(optarg) == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for socket name (Max %d characters)\n",
- PATH_MAX);
- us_vhost_usage(prgname);
- return -1;
- }
+ case OPT_STATS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for stats [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ enable_stats = ret;
+ break;
- if (!strncmp(long_option[option_index].name,
- "dma-type", MAX_LONG_OPT_SZ)) {
- if (strlen(optarg) >= MAX_LONG_OPT_SZ) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Wrong DMA type\n");
- us_vhost_usage(prgname);
- return -1;
- }
- strcpy(dma_type, optarg);
+ /* Set socket file path. */
+ case OPT_SOCKET_FILE_NUM:
+ if (us_vhost_parse_socket_path(optarg) == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for socket name (Max %d characters)\n",
+ PATH_MAX);
+ us_vhost_usage(prgname);
+ return -1;
}
+ break;
- if (!strncmp(long_option[option_index].name,
- "dmas", MAX_LONG_OPT_SZ)) {
- if (open_dma(optarg) == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Wrong DMA args\n");
- us_vhost_usage(prgname);
- return -1;
- }
- async_vhost_driver = 1;
+ case OPT_DMA_TYPE_NUM:
+ strcpy(dma_type, optarg);
+ break;
+
+ case OPT_DMAS_NUM:
+ if (open_dma(optarg) == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Wrong DMA args\n");
+ us_vhost_usage(prgname);
+ return -1;
}
+ async_vhost_driver = 1;
+ break;
+
+ case OPT_CLIENT_NUM:
+ client_mode = 1;
+ break;
+ case OPT_BUILTIN_NET_DRIVER_NUM:
+ builtin_net_driver = 1;
break;
- /* Invalid option - print options. */
+ /* Invalid option - print options. */
default:
us_vhost_usage(prgname);
return -1;
}
}
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+ while (n--)
+ rte_pktmbuf_free(pkts[n]);
+}
+
static __rte_always_inline void
-virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+complete_async_pkts(struct vhost_dev *vdev)
+{
+ struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+ uint16_t complete_count;
+
+ complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+ VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
+ if (complete_count)
+ free_pkts(p_cpl, complete_count);
+}
+
+static __rte_always_inline void
+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
struct rte_mbuf *m)
{
uint16_t ret;
- struct rte_mbuf *m_cpl[1];
if (builtin_net_driver) {
ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
- } else if (async_vhost_driver) {
- ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
- &m, 1);
-
- if (likely(ret))
- dst_vdev->nr_async_pkts++;
-
- while (likely(dst_vdev->nr_async_pkts)) {
- if (rte_vhost_poll_enqueue_completed(dst_vdev->vid,
- VIRTIO_RXQ, m_cpl, 1))
- dst_vdev->nr_async_pkts--;
- }
} else {
ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
}
if (enable_stats) {
- rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
- rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+ __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
}
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+ uint16_t ret;
+ uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+ } else if (async_vhost_driver) {
+ uint32_t cpu_cpl_nr = 0;
+ uint16_t enqueue_fail = 0;
+ struct rte_mbuf *m_cpu_cpl[nr_xmit];
+
+ complete_async_pkts(vdev);
+ ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+
+ if (cpu_cpl_nr)
+ free_pkts(m_cpu_cpl, cpu_cpl_nr);
+
+ enqueue_fail = nr_xmit - ret;
+ if (enqueue_fail)
+ free_pkts(&m[ret], nr_xmit - ret);
+ } else {
+ ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ m, nr_xmit);
+ }
+
+ if (enable_stats) {
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
+ }
+
+ if (!async_vhost_driver)
+ free_pkts(m, nr_xmit);
+}
+
+static __rte_always_inline void
+drain_vhost_table(void)
+{
+ uint16_t lcore_id = rte_lcore_id();
+ struct vhost_bufftable *vhost_txq;
+ struct vhost_dev *vdev;
+ uint64_t cur_tsc;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE
+ + vdev->vid];
+
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - vhost_txq->pre_tsc
+ > MBUF_TABLE_DRAIN_TSC)) {
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
+ "Vhost TX queue drained after timeout with burst size %u\n",
+ vhost_txq->len);
+ drain_vhost(vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = cur_tsc;
+ }
+ }
+}
+
/*
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
{
struct rte_ether_hdr *pkt_hdr;
struct vhost_dev *dst_vdev;
-
+ struct vhost_bufftable *vhost_txq;
+ uint16_t lcore_id = rte_lcore_id();
pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
return 0;
}
- virtio_xmit(dst_vdev, vdev, m);
+ vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid];
+ vhost_txq->m_table[vhost_txq->len++] = m;
+
+ if (enable_stats) {
+ vdev->stats.tx_total++;
+ vdev->stats.tx++;
+ }
+
+ if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
+ drain_vhost(dst_vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = rte_rdtsc();
+ }
return 0;
}
tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
}
-static inline void
-free_pkts(struct rte_mbuf **pkts, uint16_t n)
-{
- while (n--)
- rte_pktmbuf_free(pkts[n]);
-}
-
static __rte_always_inline void
do_drain_mbuf_table(struct mbuf_table *tx_q)
{
TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
if (vdev2 != vdev)
- virtio_xmit(vdev2, vdev, m);
+ sync_virtio_xmit(vdev2, vdev, m);
}
goto queue2nic;
}
/*check if destination is local VM*/
- if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
- rte_pktmbuf_free(m);
+ if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
return;
- }
if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
if (unlikely(find_local_dest(vdev, m, &offset,
}
}
-static __rte_always_inline void
-complete_async_pkts(struct vhost_dev *vdev, uint16_t qid)
-{
- struct rte_mbuf *p_cpl[MAX_PKT_BURST];
- uint16_t complete_count;
-
- complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
- qid, p_cpl, MAX_PKT_BURST);
- vdev->nr_async_pkts -= complete_count;
- if (complete_count)
- free_pkts(p_cpl, complete_count);
-}
-
static __rte_always_inline void
drain_eth_rx(struct vhost_dev *vdev)
{
rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
pkts, MAX_PKT_BURST);
- while (likely(vdev->nr_async_pkts))
- complete_async_pkts(vdev, VIRTIO_RXQ);
-
if (!rx_count)
return;
enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
pkts, rx_count);
} else if (async_vhost_driver) {
+ uint32_t cpu_cpl_nr = 0;
+ uint16_t enqueue_fail = 0;
+ struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
+
+ complete_async_pkts(vdev);
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
- VIRTIO_RXQ, pkts, rx_count);
- vdev->nr_async_pkts += enqueue_count;
+ VIRTIO_RXQ, pkts, rx_count,
+ m_cpu_cpl, &cpu_cpl_nr);
+ if (cpu_cpl_nr)
+ free_pkts(m_cpu_cpl, cpu_cpl_nr);
+
+ enqueue_fail = rx_count - enqueue_count;
+ if (enqueue_fail)
+ free_pkts(&pkts[enqueue_count], enqueue_fail);
+
} else {
enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
pkts, rx_count);
}
if (enable_stats) {
- rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
- rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
+ __ATOMIC_SEQ_CST);
}
if (!async_vhost_driver)
while(1) {
drain_mbuf_table(tx_q);
-
+ drain_vhost_table();
/*
* Inform the configuration core that we have exited the
* linked list and that no devices are in use if requested.
{
struct vhost_dev *vdev = NULL;
int lcore;
+ uint16_t i;
TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
if (vdev->vid == vid)
rte_pause();
}
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);
+
if (builtin_net_driver)
vs_vhost_net_remove(vdev);
new_device(int vid)
{
int lcore, core_add = 0;
+ uint16_t i;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
-
- struct rte_vhost_async_channel_ops channel_ops = {
- .transfer_data = ioat_transfer_data_cb,
- .check_completed_copies = ioat_check_completed_copies_cb
- };
- struct rte_vhost_async_features f;
-
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
}
vdev->vid = vid;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ vhost_txbuff[i * MAX_VHOST_DEVICE + vid]
+ = rte_zmalloc("vhost bufftable",
+ sizeof(struct vhost_bufftable),
+ RTE_CACHE_LINE_SIZE);
+
+ if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) couldn't allocate memory for vhost TX\n", vid);
+ return -1;
+ }
+ }
+
if (builtin_net_driver)
vs_vhost_net_setup(vdev);
vid, vdev->coreid);
if (async_vhost_driver) {
- f.async_inorder = 1;
- f.async_threshold = 256;
- return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
- f.intval, &channel_ops);
+ struct rte_vhost_async_features f;
+ struct rte_vhost_async_channel_ops channel_ops;
+
+ if (strncmp(dma_type, "ioat", 4) == 0) {
+ channel_ops.transfer_data = ioat_transfer_data_cb;
+ channel_ops.check_completed_copies =
+ ioat_check_completed_copies_cb;
+
+ f.async_inorder = 1;
+ f.async_threshold = 256;
+
+ return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
+ f.intval, &channel_ops);
+ }
}
return 0;
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
- rx = rte_atomic64_read(&vdev->stats.rx_atomic);
+ rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
+ __ATOMIC_SEQ_CST);
+ rx = __atomic_load_n(&vdev->stats.rx_atomic,
+ __ATOMIC_SEQ_CST);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
/* Register vhost user driver to handle vhost messages. */
for (i = 0; i < nb_sockets; i++) {
char *file = socket_files + i * PATH_MAX;
+
if (async_vhost_driver)
flags = flags | RTE_VHOST_USER_ASYNC_COPY;