1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
19 #include "rte_eth_vhost.h"
21 static int vhost_logtype;
23 #define VHOST_LOG(level, ...) \
24 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28 #define ETH_VHOST_IFACE_ARG "iface"
29 #define ETH_VHOST_QUEUES_ARG "queues"
30 #define ETH_VHOST_CLIENT_ARG "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define VHOST_MAX_PKT_BURST 32
37 static const char *valid_arguments[] = {
41 ETH_VHOST_DEQUEUE_ZERO_COPY,
42 ETH_VHOST_IOMMU_SUPPORT,
43 ETH_VHOST_POSTCOPY_SUPPORT,
44 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
48 static struct rte_ether_addr base_eth_addr = {
59 enum vhost_xstats_pkts {
60 VHOST_UNDERSIZE_PKT = 0,
65 VHOST_512_TO_1023_PKT,
66 VHOST_1024_TO_1522_PKT,
67 VHOST_1523_TO_MAX_PKT,
72 VHOST_ERRORS_FRAGMENTED,
74 VHOST_UNKNOWN_PROTOCOL,
82 uint64_t xstats[VHOST_XSTATS_MAX];
87 rte_atomic32_t allow_queuing;
88 rte_atomic32_t while_queuing;
89 struct pmd_internal *internal;
90 struct rte_mempool *mb_pool;
92 uint16_t virtqueue_id;
93 struct vhost_stats stats;
97 rte_atomic32_t dev_attached;
101 uint64_t disable_flags;
104 rte_atomic32_t started;
108 struct internal_list {
109 TAILQ_ENTRY(internal_list) next;
110 struct rte_eth_dev *eth_dev;
113 TAILQ_HEAD(internal_list_head, internal_list);
114 static struct internal_list_head internal_list =
115 TAILQ_HEAD_INITIALIZER(internal_list);
117 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
119 static struct rte_eth_link pmd_link = {
121 .link_duplex = ETH_LINK_FULL_DUPLEX,
122 .link_status = ETH_LINK_DOWN
125 struct rte_vhost_vring_state {
128 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
129 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
131 unsigned int max_vring;
134 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
136 #define VHOST_XSTATS_NAME_SIZE 64
138 struct vhost_xstats_name_off {
139 char name[VHOST_XSTATS_NAME_SIZE];
143 /* [rx]_is prepended to the name string here */
144 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
146 offsetof(struct vhost_queue, stats.pkts)},
148 offsetof(struct vhost_queue, stats.bytes)},
150 offsetof(struct vhost_queue, stats.missed_pkts)},
151 {"broadcast_packets",
152 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
153 {"multicast_packets",
154 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
156 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
157 {"undersize_packets",
158 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
160 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
161 {"size_65_to_127_packets",
162 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
163 {"size_128_to_255_packets",
164 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
165 {"size_256_to_511_packets",
166 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
167 {"size_512_to_1023_packets",
168 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
169 {"size_1024_to_1522_packets",
170 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
171 {"size_1523_to_max_packets",
172 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
173 {"errors_with_bad_CRC",
174 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
175 {"fragmented_errors",
176 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
178 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
179 {"unknown_protos_packets",
180 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
183 /* [tx]_ is prepended to the name string here */
184 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
186 offsetof(struct vhost_queue, stats.pkts)},
188 offsetof(struct vhost_queue, stats.bytes)},
190 offsetof(struct vhost_queue, stats.missed_pkts)},
191 {"broadcast_packets",
192 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
193 {"multicast_packets",
194 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
196 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
197 {"undersize_packets",
198 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
200 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
201 {"size_65_to_127_packets",
202 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
203 {"size_128_to_255_packets",
204 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
205 {"size_256_to_511_packets",
206 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
207 {"size_512_to_1023_packets",
208 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
209 {"size_1024_to_1522_packets",
210 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
211 {"size_1523_to_max_packets",
212 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
213 {"errors_with_bad_CRC",
214 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
217 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
218 sizeof(vhost_rxport_stat_strings[0]))
220 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
221 sizeof(vhost_txport_stat_strings[0]))
224 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
226 struct vhost_queue *vq = NULL;
229 for (i = 0; i < dev->data->nb_rx_queues; i++) {
230 vq = dev->data->rx_queues[i];
233 memset(&vq->stats, 0, sizeof(vq->stats));
235 for (i = 0; i < dev->data->nb_tx_queues; i++) {
236 vq = dev->data->tx_queues[i];
239 memset(&vq->stats, 0, sizeof(vq->stats));
246 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
247 struct rte_eth_xstat_name *xstats_names,
248 unsigned int limit __rte_unused)
252 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
256 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
257 snprintf(xstats_names[count].name,
258 sizeof(xstats_names[count].name),
259 "rx_%s", vhost_rxport_stat_strings[t].name);
262 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
263 snprintf(xstats_names[count].name,
264 sizeof(xstats_names[count].name),
265 "tx_%s", vhost_txport_stat_strings[t].name);
272 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
277 unsigned int count = 0;
278 struct vhost_queue *vq = NULL;
279 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
284 for (i = 0; i < dev->data->nb_rx_queues; i++) {
285 vq = dev->data->rx_queues[i];
288 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
289 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
290 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
292 for (i = 0; i < dev->data->nb_tx_queues; i++) {
293 vq = dev->data->tx_queues[i];
296 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
297 + vq->stats.missed_pkts
298 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
299 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
301 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
302 xstats[count].value = 0;
303 for (i = 0; i < dev->data->nb_rx_queues; i++) {
304 vq = dev->data->rx_queues[i];
307 xstats[count].value +=
308 *(uint64_t *)(((char *)vq)
309 + vhost_rxport_stat_strings[t].offset);
311 xstats[count].id = count;
314 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
315 xstats[count].value = 0;
316 for (i = 0; i < dev->data->nb_tx_queues; i++) {
317 vq = dev->data->tx_queues[i];
320 xstats[count].value +=
321 *(uint64_t *)(((char *)vq)
322 + vhost_txport_stat_strings[t].offset);
324 xstats[count].id = count;
331 vhost_count_multicast_broadcast(struct vhost_queue *vq,
332 struct rte_mbuf *mbuf)
334 struct rte_ether_addr *ea = NULL;
335 struct vhost_stats *pstats = &vq->stats;
337 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
338 if (rte_is_multicast_ether_addr(ea)) {
339 if (rte_is_broadcast_ether_addr(ea))
340 pstats->xstats[VHOST_BROADCAST_PKT]++;
342 pstats->xstats[VHOST_MULTICAST_PKT]++;
347 vhost_update_packet_xstats(struct vhost_queue *vq,
348 struct rte_mbuf **bufs,
351 uint32_t pkt_len = 0;
354 struct vhost_stats *pstats = &vq->stats;
356 for (i = 0; i < count ; i++) {
357 pkt_len = bufs[i]->pkt_len;
359 pstats->xstats[VHOST_64_PKT]++;
360 } else if (pkt_len > 64 && pkt_len < 1024) {
361 index = (sizeof(pkt_len) * 8)
362 - __builtin_clz(pkt_len) - 5;
363 pstats->xstats[index]++;
366 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
367 else if (pkt_len <= 1522)
368 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
369 else if (pkt_len > 1522)
370 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
372 vhost_count_multicast_broadcast(vq, bufs[i]);
377 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
379 struct vhost_queue *r = q;
380 uint16_t i, nb_rx = 0;
381 uint16_t nb_receive = nb_bufs;
383 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
386 rte_atomic32_set(&r->while_queuing, 1);
388 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
391 /* Dequeue packets from guest TX queue */
394 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
395 VHOST_MAX_PKT_BURST);
397 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
398 r->mb_pool, &bufs[nb_rx],
402 nb_receive -= nb_pkts;
407 r->stats.pkts += nb_rx;
409 for (i = 0; likely(i < nb_rx); i++) {
410 bufs[i]->port = r->port;
411 bufs[i]->vlan_tci = 0;
413 if (r->internal->vlan_strip)
414 rte_vlan_strip(bufs[i]);
416 r->stats.bytes += bufs[i]->pkt_len;
419 vhost_update_packet_xstats(r, bufs, nb_rx);
422 rte_atomic32_set(&r->while_queuing, 0);
428 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
430 struct vhost_queue *r = q;
431 uint16_t i, nb_tx = 0;
432 uint16_t nb_send = 0;
434 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
437 rte_atomic32_set(&r->while_queuing, 1);
439 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
442 for (i = 0; i < nb_bufs; i++) {
443 struct rte_mbuf *m = bufs[i];
445 /* Do VLAN tag insertion */
446 if (m->ol_flags & PKT_TX_VLAN_PKT) {
447 int error = rte_vlan_insert(&m);
448 if (unlikely(error)) {
458 /* Enqueue packets to guest RX queue */
461 uint16_t num = (uint16_t)RTE_MIN(nb_send,
462 VHOST_MAX_PKT_BURST);
464 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
473 r->stats.pkts += nb_tx;
474 r->stats.missed_pkts += nb_bufs - nb_tx;
476 for (i = 0; likely(i < nb_tx); i++)
477 r->stats.bytes += bufs[i]->pkt_len;
479 vhost_update_packet_xstats(r, bufs, nb_tx);
481 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
482 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
483 * are increased when packets are not transmitted successfully.
485 for (i = nb_tx; i < nb_bufs; i++)
486 vhost_count_multicast_broadcast(r, bufs[i]);
488 for (i = 0; likely(i < nb_tx); i++)
489 rte_pktmbuf_free(bufs[i]);
491 rte_atomic32_set(&r->while_queuing, 0);
496 static inline struct internal_list *
497 find_internal_resource(char *ifname)
500 struct internal_list *list;
501 struct pmd_internal *internal;
506 pthread_mutex_lock(&internal_list_lock);
508 TAILQ_FOREACH(list, &internal_list, next) {
509 internal = list->eth_dev->data->dev_private;
510 if (!strcmp(internal->iface_name, ifname)) {
516 pthread_mutex_unlock(&internal_list_lock);
525 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
527 struct rte_vhost_vring vring;
528 struct vhost_queue *vq;
531 vq = dev->data->rx_queues[qid];
533 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
537 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
539 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
542 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
543 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
550 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
552 struct rte_vhost_vring vring;
553 struct vhost_queue *vq;
556 vq = dev->data->rx_queues[qid];
558 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
562 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
564 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
567 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
568 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
575 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
577 struct rte_intr_handle *intr_handle = dev->intr_handle;
580 if (intr_handle->intr_vec)
581 free(intr_handle->intr_vec);
585 dev->intr_handle = NULL;
589 eth_vhost_install_intr(struct rte_eth_dev *dev)
591 struct rte_vhost_vring vring;
592 struct vhost_queue *vq;
594 int nb_rxq = dev->data->nb_rx_queues;
598 /* uninstall firstly if we are reconnecting */
599 if (dev->intr_handle)
600 eth_vhost_uninstall_intr(dev);
602 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
603 if (!dev->intr_handle) {
604 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
607 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
609 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
611 dev->intr_handle->intr_vec =
612 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
614 if (!dev->intr_handle->intr_vec) {
616 "Failed to allocate memory for interrupt vector\n");
617 free(dev->intr_handle);
621 VHOST_LOG(INFO, "Prepare intr vec\n");
622 for (i = 0; i < nb_rxq; i++) {
623 vq = dev->data->rx_queues[i];
625 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
629 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
632 "Failed to get rxq-%d's vring, skip!\n", i);
636 if (vring.kickfd < 0) {
638 "rxq-%d's kickfd is invalid, skip!\n", i);
641 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
642 dev->intr_handle->efds[i] = vring.kickfd;
644 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
647 dev->intr_handle->nb_efd = count;
648 dev->intr_handle->max_intr = count + 1;
649 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
655 update_queuing_status(struct rte_eth_dev *dev)
657 struct pmd_internal *internal = dev->data->dev_private;
658 struct vhost_queue *vq;
660 int allow_queuing = 1;
662 if (!dev->data->rx_queues || !dev->data->tx_queues)
665 if (rte_atomic32_read(&internal->started) == 0 ||
666 rte_atomic32_read(&internal->dev_attached) == 0)
669 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
670 for (i = 0; i < dev->data->nb_rx_queues; i++) {
671 vq = dev->data->rx_queues[i];
674 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
675 while (rte_atomic32_read(&vq->while_queuing))
679 for (i = 0; i < dev->data->nb_tx_queues; i++) {
680 vq = dev->data->tx_queues[i];
683 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
684 while (rte_atomic32_read(&vq->while_queuing))
690 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
692 struct vhost_queue *vq;
695 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
696 vq = eth_dev->data->rx_queues[i];
699 vq->vid = internal->vid;
700 vq->internal = internal;
701 vq->port = eth_dev->data->port_id;
703 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
704 vq = eth_dev->data->tx_queues[i];
707 vq->vid = internal->vid;
708 vq->internal = internal;
709 vq->port = eth_dev->data->port_id;
716 struct rte_eth_dev *eth_dev;
717 struct internal_list *list;
718 struct pmd_internal *internal;
719 struct rte_eth_conf *dev_conf;
721 char ifname[PATH_MAX];
722 #ifdef RTE_LIBRTE_VHOST_NUMA
726 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
727 list = find_internal_resource(ifname);
729 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
733 eth_dev = list->eth_dev;
734 internal = eth_dev->data->dev_private;
735 dev_conf = ð_dev->data->dev_conf;
737 #ifdef RTE_LIBRTE_VHOST_NUMA
738 newnode = rte_vhost_get_numa_node(vid);
740 eth_dev->data->numa_node = newnode;
744 if (rte_atomic32_read(&internal->started) == 1) {
745 queue_setup(eth_dev, internal);
747 if (dev_conf->intr_conf.rxq) {
748 if (eth_vhost_install_intr(eth_dev) < 0) {
750 "Failed to install interrupt handler.");
755 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
758 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
759 rte_vhost_enable_guest_notification(vid, i, 0);
761 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
763 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
765 rte_atomic32_set(&internal->dev_attached, 1);
766 update_queuing_status(eth_dev);
768 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
770 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
776 destroy_device(int vid)
778 struct rte_eth_dev *eth_dev;
779 struct pmd_internal *internal;
780 struct vhost_queue *vq;
781 struct internal_list *list;
782 char ifname[PATH_MAX];
784 struct rte_vhost_vring_state *state;
786 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
787 list = find_internal_resource(ifname);
789 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
792 eth_dev = list->eth_dev;
793 internal = eth_dev->data->dev_private;
795 rte_atomic32_set(&internal->dev_attached, 0);
796 update_queuing_status(eth_dev);
798 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
800 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
801 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
802 vq = eth_dev->data->rx_queues[i];
807 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
808 vq = eth_dev->data->tx_queues[i];
815 state = vring_states[eth_dev->data->port_id];
816 rte_spinlock_lock(&state->lock);
817 for (i = 0; i <= state->max_vring; i++) {
818 state->cur[i] = false;
819 state->seen[i] = false;
821 state->max_vring = 0;
822 rte_spinlock_unlock(&state->lock);
824 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
825 eth_vhost_uninstall_intr(eth_dev);
827 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
831 vring_state_changed(int vid, uint16_t vring, int enable)
833 struct rte_vhost_vring_state *state;
834 struct rte_eth_dev *eth_dev;
835 struct internal_list *list;
836 char ifname[PATH_MAX];
838 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
839 list = find_internal_resource(ifname);
841 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
845 eth_dev = list->eth_dev;
847 state = vring_states[eth_dev->data->port_id];
848 rte_spinlock_lock(&state->lock);
849 if (state->cur[vring] == enable) {
850 rte_spinlock_unlock(&state->lock);
853 state->cur[vring] = enable;
854 state->max_vring = RTE_MAX(vring, state->max_vring);
855 rte_spinlock_unlock(&state->lock);
857 VHOST_LOG(INFO, "vring%u is %s\n",
858 vring, enable ? "enabled" : "disabled");
860 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
865 static struct vhost_device_ops vhost_ops = {
866 .new_device = new_device,
867 .destroy_device = destroy_device,
868 .vring_state_changed = vring_state_changed,
872 vhost_driver_setup(struct rte_eth_dev *eth_dev)
874 struct pmd_internal *internal = eth_dev->data->dev_private;
875 struct internal_list *list = NULL;
876 struct rte_vhost_vring_state *vring_state = NULL;
877 unsigned int numa_node = eth_dev->device->numa_node;
878 const char *name = eth_dev->device->name;
880 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
884 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
886 if (vring_state == NULL)
889 list->eth_dev = eth_dev;
890 pthread_mutex_lock(&internal_list_lock);
891 TAILQ_INSERT_TAIL(&internal_list, list, next);
892 pthread_mutex_unlock(&internal_list_lock);
894 rte_spinlock_init(&vring_state->lock);
895 vring_states[eth_dev->data->port_id] = vring_state;
897 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
900 if (internal->disable_flags) {
901 if (rte_vhost_driver_disable_features(internal->iface_name,
902 internal->disable_flags))
906 if (rte_vhost_driver_callback_register(internal->iface_name,
908 VHOST_LOG(ERR, "Can't register callbacks\n");
912 if (rte_vhost_driver_start(internal->iface_name) < 0) {
913 VHOST_LOG(ERR, "Failed to start driver for %s\n",
914 internal->iface_name);
921 rte_free(vring_state);
928 rte_eth_vhost_get_queue_event(uint16_t port_id,
929 struct rte_eth_vhost_queue_event *event)
931 struct rte_vhost_vring_state *state;
935 if (port_id >= RTE_MAX_ETHPORTS) {
936 VHOST_LOG(ERR, "Invalid port id\n");
940 state = vring_states[port_id];
942 VHOST_LOG(ERR, "Unused port\n");
946 rte_spinlock_lock(&state->lock);
947 for (i = 0; i <= state->max_vring; i++) {
948 idx = state->index++ % (state->max_vring + 1);
950 if (state->cur[idx] != state->seen[idx]) {
951 state->seen[idx] = state->cur[idx];
952 event->queue_id = idx / 2;
954 event->enable = state->cur[idx];
955 rte_spinlock_unlock(&state->lock);
959 rte_spinlock_unlock(&state->lock);
965 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
967 struct internal_list *list;
968 struct rte_eth_dev *eth_dev;
969 struct vhost_queue *vq;
972 if (!rte_eth_dev_is_valid_port(port_id))
975 pthread_mutex_lock(&internal_list_lock);
977 TAILQ_FOREACH(list, &internal_list, next) {
978 eth_dev = list->eth_dev;
979 if (eth_dev->data->port_id == port_id) {
980 vq = eth_dev->data->rx_queues[0];
988 pthread_mutex_unlock(&internal_list_lock);
994 eth_dev_configure(struct rte_eth_dev *dev)
996 struct pmd_internal *internal = dev->data->dev_private;
997 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
999 /* NOTE: the same process has to operate a vhost interface
1000 * from beginning to end (from eth_dev configure to eth_dev close).
1001 * It is user's responsibility at the moment.
1003 if (vhost_driver_setup(dev) < 0)
1006 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1012 eth_dev_start(struct rte_eth_dev *eth_dev)
1014 struct pmd_internal *internal = eth_dev->data->dev_private;
1015 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1017 queue_setup(eth_dev, internal);
1019 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1020 if (dev_conf->intr_conf.rxq) {
1021 if (eth_vhost_install_intr(eth_dev) < 0) {
1023 "Failed to install interrupt handler.");
1029 rte_atomic32_set(&internal->started, 1);
1030 update_queuing_status(eth_dev);
1036 eth_dev_stop(struct rte_eth_dev *dev)
1038 struct pmd_internal *internal = dev->data->dev_private;
1040 rte_atomic32_set(&internal->started, 0);
1041 update_queuing_status(dev);
1045 eth_dev_close(struct rte_eth_dev *dev)
1047 struct pmd_internal *internal;
1048 struct internal_list *list;
1051 internal = dev->data->dev_private;
1057 rte_vhost_driver_unregister(internal->iface_name);
1059 list = find_internal_resource(internal->iface_name);
1063 pthread_mutex_lock(&internal_list_lock);
1064 TAILQ_REMOVE(&internal_list, list, next);
1065 pthread_mutex_unlock(&internal_list_lock);
1068 if (dev->data->rx_queues)
1069 for (i = 0; i < dev->data->nb_rx_queues; i++)
1070 rte_free(dev->data->rx_queues[i]);
1072 if (dev->data->tx_queues)
1073 for (i = 0; i < dev->data->nb_tx_queues; i++)
1074 rte_free(dev->data->tx_queues[i]);
1076 free(internal->dev_name);
1077 rte_free(internal->iface_name);
1080 dev->data->dev_private = NULL;
1082 rte_free(vring_states[dev->data->port_id]);
1083 vring_states[dev->data->port_id] = NULL;
1087 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1088 uint16_t nb_rx_desc __rte_unused,
1089 unsigned int socket_id,
1090 const struct rte_eth_rxconf *rx_conf __rte_unused,
1091 struct rte_mempool *mb_pool)
1093 struct vhost_queue *vq;
1095 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1096 RTE_CACHE_LINE_SIZE, socket_id);
1098 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1102 vq->mb_pool = mb_pool;
1103 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1104 dev->data->rx_queues[rx_queue_id] = vq;
1110 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1111 uint16_t nb_tx_desc __rte_unused,
1112 unsigned int socket_id,
1113 const struct rte_eth_txconf *tx_conf __rte_unused)
1115 struct vhost_queue *vq;
1117 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1118 RTE_CACHE_LINE_SIZE, socket_id);
1120 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1124 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1125 dev->data->tx_queues[tx_queue_id] = vq;
1131 eth_dev_info(struct rte_eth_dev *dev,
1132 struct rte_eth_dev_info *dev_info)
1134 struct pmd_internal *internal;
1136 internal = dev->data->dev_private;
1137 if (internal == NULL) {
1138 VHOST_LOG(ERR, "Invalid device specified\n");
1142 dev_info->max_mac_addrs = 1;
1143 dev_info->max_rx_pktlen = (uint32_t)-1;
1144 dev_info->max_rx_queues = internal->max_queues;
1145 dev_info->max_tx_queues = internal->max_queues;
1146 dev_info->min_rx_bufsize = 0;
1148 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1149 DEV_TX_OFFLOAD_VLAN_INSERT;
1150 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1156 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1159 unsigned long rx_total = 0, tx_total = 0;
1160 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1161 struct vhost_queue *vq;
1163 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1164 i < dev->data->nb_rx_queues; i++) {
1165 if (dev->data->rx_queues[i] == NULL)
1167 vq = dev->data->rx_queues[i];
1168 stats->q_ipackets[i] = vq->stats.pkts;
1169 rx_total += stats->q_ipackets[i];
1171 stats->q_ibytes[i] = vq->stats.bytes;
1172 rx_total_bytes += stats->q_ibytes[i];
1175 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1176 i < dev->data->nb_tx_queues; i++) {
1177 if (dev->data->tx_queues[i] == NULL)
1179 vq = dev->data->tx_queues[i];
1180 stats->q_opackets[i] = vq->stats.pkts;
1181 tx_total += stats->q_opackets[i];
1183 stats->q_obytes[i] = vq->stats.bytes;
1184 tx_total_bytes += stats->q_obytes[i];
1187 stats->ipackets = rx_total;
1188 stats->opackets = tx_total;
1189 stats->ibytes = rx_total_bytes;
1190 stats->obytes = tx_total_bytes;
1196 eth_stats_reset(struct rte_eth_dev *dev)
1198 struct vhost_queue *vq;
1201 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1202 if (dev->data->rx_queues[i] == NULL)
1204 vq = dev->data->rx_queues[i];
1206 vq->stats.bytes = 0;
1208 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1209 if (dev->data->tx_queues[i] == NULL)
1211 vq = dev->data->tx_queues[i];
1213 vq->stats.bytes = 0;
1214 vq->stats.missed_pkts = 0;
1221 eth_queue_release(void *q)
1227 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1230 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1231 * and releases mbuf, so nothing to cleanup.
1237 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1238 int wait_to_complete __rte_unused)
1244 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1246 struct vhost_queue *vq;
1248 vq = dev->data->rx_queues[rx_queue_id];
1252 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1255 static const struct eth_dev_ops ops = {
1256 .dev_start = eth_dev_start,
1257 .dev_stop = eth_dev_stop,
1258 .dev_close = eth_dev_close,
1259 .dev_configure = eth_dev_configure,
1260 .dev_infos_get = eth_dev_info,
1261 .rx_queue_setup = eth_rx_queue_setup,
1262 .tx_queue_setup = eth_tx_queue_setup,
1263 .rx_queue_release = eth_queue_release,
1264 .tx_queue_release = eth_queue_release,
1265 .tx_done_cleanup = eth_tx_done_cleanup,
1266 .rx_queue_count = eth_rx_queue_count,
1267 .link_update = eth_link_update,
1268 .stats_get = eth_stats_get,
1269 .stats_reset = eth_stats_reset,
1270 .xstats_reset = vhost_dev_xstats_reset,
1271 .xstats_get = vhost_dev_xstats_get,
1272 .xstats_get_names = vhost_dev_xstats_get_names,
1273 .rx_queue_intr_enable = eth_rxq_intr_enable,
1274 .rx_queue_intr_disable = eth_rxq_intr_disable,
1278 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1279 int16_t queues, const unsigned int numa_node, uint64_t flags,
1280 uint64_t disable_flags)
1282 const char *name = rte_vdev_device_name(dev);
1283 struct rte_eth_dev_data *data;
1284 struct pmd_internal *internal = NULL;
1285 struct rte_eth_dev *eth_dev = NULL;
1286 struct rte_ether_addr *eth_addr = NULL;
1288 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1291 /* reserve an ethdev entry */
1292 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1293 if (eth_dev == NULL)
1295 data = eth_dev->data;
1297 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1298 if (eth_addr == NULL)
1300 data->mac_addrs = eth_addr;
1301 *eth_addr = base_eth_addr;
1302 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1304 /* now put it all together
1305 * - store queue data in internal,
1306 * - point eth_dev_data to internals
1307 * - and point eth_dev structure to new eth_dev_data structure
1309 internal = eth_dev->data->dev_private;
1310 internal->dev_name = strdup(name);
1311 if (internal->dev_name == NULL)
1313 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1315 if (internal->iface_name == NULL)
1317 strcpy(internal->iface_name, iface_name);
1319 data->nb_rx_queues = queues;
1320 data->nb_tx_queues = queues;
1321 internal->max_queues = queues;
1323 internal->flags = flags;
1324 internal->disable_flags = disable_flags;
1325 data->dev_link = pmd_link;
1326 data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1328 eth_dev->dev_ops = &ops;
1330 /* finally assign rx and tx ops */
1331 eth_dev->rx_pkt_burst = eth_vhost_rx;
1332 eth_dev->tx_pkt_burst = eth_vhost_tx;
1334 rte_eth_dev_probing_finish(eth_dev);
1339 rte_free(internal->iface_name);
1340 free(internal->dev_name);
1342 rte_eth_dev_release_port(eth_dev);
1348 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1350 const char **iface_name = extra_args;
1355 *iface_name = value;
1361 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1363 uint16_t *n = extra_args;
1365 if (value == NULL || extra_args == NULL)
1368 *n = (uint16_t)strtoul(value, NULL, 0);
1369 if (*n == USHRT_MAX && errno == ERANGE)
1376 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1378 struct rte_kvargs *kvlist = NULL;
1383 uint64_t disable_flags = 0;
1384 int client_mode = 0;
1385 int dequeue_zero_copy = 0;
1386 int iommu_support = 0;
1387 int postcopy_support = 0;
1389 struct rte_eth_dev *eth_dev;
1390 const char *name = rte_vdev_device_name(dev);
1392 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1394 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1395 eth_dev = rte_eth_dev_attach_secondary(name);
1397 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1400 eth_dev->rx_pkt_burst = eth_vhost_rx;
1401 eth_dev->tx_pkt_burst = eth_vhost_tx;
1402 eth_dev->dev_ops = &ops;
1403 if (dev->device.numa_node == SOCKET_ID_ANY)
1404 dev->device.numa_node = rte_socket_id();
1405 eth_dev->device = &dev->device;
1406 rte_eth_dev_probing_finish(eth_dev);
1410 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1414 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1415 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1416 &open_iface, &iface_name);
1424 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1425 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1426 &open_int, &queues);
1427 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1433 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1434 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1435 &open_int, &client_mode);
1440 flags |= RTE_VHOST_USER_CLIENT;
1443 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1444 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1445 &open_int, &dequeue_zero_copy);
1449 if (dequeue_zero_copy)
1450 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1453 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1454 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1455 &open_int, &iommu_support);
1460 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1463 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1464 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1465 &open_int, &postcopy_support);
1469 if (postcopy_support)
1470 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1473 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1474 ret = rte_kvargs_process(kvlist,
1475 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1481 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1482 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1486 if (dev->device.numa_node == SOCKET_ID_ANY)
1487 dev->device.numa_node = rte_socket_id();
1489 ret = eth_dev_vhost_create(dev, iface_name, queues,
1490 dev->device.numa_node, flags, disable_flags);
1492 VHOST_LOG(ERR, "Failed to create %s\n", name);
1495 rte_kvargs_free(kvlist);
1500 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1503 struct rte_eth_dev *eth_dev = NULL;
1505 name = rte_vdev_device_name(dev);
1506 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1508 /* find an ethdev entry */
1509 eth_dev = rte_eth_dev_allocated(name);
1510 if (eth_dev == NULL)
1513 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1514 return rte_eth_dev_release_port(eth_dev);
1516 eth_dev_close(eth_dev);
1518 rte_eth_dev_release_port(eth_dev);
1523 static struct rte_vdev_driver pmd_vhost_drv = {
1524 .probe = rte_pmd_vhost_probe,
1525 .remove = rte_pmd_vhost_remove,
1528 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1529 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1530 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1534 "dequeue-zero-copy=<0|1> "
1535 "iommu-support=<0|1> "
1536 "postcopy-support=<0|1> "
1539 RTE_INIT(vhost_init_log)
1541 vhost_logtype = rte_log_register("pmd.net.vhost");
1542 if (vhost_logtype >= 0)
1543 rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);