1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
11 #include <ethdev_driver.h>
12 #include <ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
20 #include "rte_eth_vhost.h"
22 RTE_LOG_REGISTER_DEFAULT(vhost_logtype, NOTICE);
24 #define VHOST_LOG(level, ...) \
25 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
27 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
29 #define ETH_VHOST_IFACE_ARG "iface"
30 #define ETH_VHOST_QUEUES_ARG "queues"
31 #define ETH_VHOST_CLIENT_ARG "client"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF "linear-buffer"
36 #define ETH_VHOST_EXT_BUF "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
39 static const char *valid_arguments[] = {
43 ETH_VHOST_IOMMU_SUPPORT,
44 ETH_VHOST_POSTCOPY_SUPPORT,
45 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
51 static struct rte_ether_addr base_eth_addr = {
62 enum vhost_xstats_pkts {
63 VHOST_UNDERSIZE_PKT = 0,
68 VHOST_512_TO_1023_PKT,
69 VHOST_1024_TO_1522_PKT,
70 VHOST_1523_TO_MAX_PKT,
78 VHOST_ERRORS_FRAGMENTED,
80 VHOST_UNKNOWN_PROTOCOL,
88 uint64_t xstats[VHOST_XSTATS_MAX];
93 rte_atomic32_t allow_queuing;
94 rte_atomic32_t while_queuing;
95 struct pmd_internal *internal;
96 struct rte_mempool *mb_pool;
98 uint16_t virtqueue_id;
99 struct vhost_stats stats;
101 rte_spinlock_t intr_lock;
104 struct pmd_internal {
105 rte_atomic32_t dev_attached;
108 uint64_t disable_flags;
111 rte_atomic32_t started;
115 struct internal_list {
116 TAILQ_ENTRY(internal_list) next;
117 struct rte_eth_dev *eth_dev;
120 TAILQ_HEAD(internal_list_head, internal_list);
121 static struct internal_list_head internal_list =
122 TAILQ_HEAD_INITIALIZER(internal_list);
124 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
126 static struct rte_eth_link pmd_link = {
128 .link_duplex = ETH_LINK_FULL_DUPLEX,
129 .link_status = ETH_LINK_DOWN
132 struct rte_vhost_vring_state {
135 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
136 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
138 unsigned int max_vring;
141 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
143 #define VHOST_XSTATS_NAME_SIZE 64
145 struct vhost_xstats_name_off {
146 char name[VHOST_XSTATS_NAME_SIZE];
150 /* [rx]_is prepended to the name string here */
151 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
153 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
155 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
157 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
158 {"broadcast_packets",
159 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
160 {"multicast_packets",
161 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
163 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
164 {"undersize_packets",
165 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
167 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
168 {"size_65_to_127_packets",
169 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
170 {"size_128_to_255_packets",
171 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
172 {"size_256_to_511_packets",
173 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
174 {"size_512_to_1023_packets",
175 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
176 {"size_1024_to_1522_packets",
177 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
178 {"size_1523_to_max_packets",
179 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
180 {"errors_with_bad_CRC",
181 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
182 {"fragmented_errors",
183 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
185 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
186 {"unknown_protos_packets",
187 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
190 /* [tx]_ is prepended to the name string here */
191 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
193 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
195 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
197 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
198 {"broadcast_packets",
199 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
200 {"multicast_packets",
201 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
203 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
204 {"undersize_packets",
205 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
207 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
208 {"size_65_to_127_packets",
209 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
210 {"size_128_to_255_packets",
211 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
212 {"size_256_to_511_packets",
213 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
214 {"size_512_to_1023_packets",
215 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
216 {"size_1024_to_1522_packets",
217 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
218 {"size_1523_to_max_packets",
219 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
220 {"errors_with_bad_CRC",
221 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
224 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
225 sizeof(vhost_rxport_stat_strings[0]))
227 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
228 sizeof(vhost_txport_stat_strings[0]))
231 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
233 struct vhost_queue *vq = NULL;
236 for (i = 0; i < dev->data->nb_rx_queues; i++) {
237 vq = dev->data->rx_queues[i];
240 memset(&vq->stats, 0, sizeof(vq->stats));
242 for (i = 0; i < dev->data->nb_tx_queues; i++) {
243 vq = dev->data->tx_queues[i];
246 memset(&vq->stats, 0, sizeof(vq->stats));
253 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
254 struct rte_eth_xstat_name *xstats_names,
255 unsigned int limit __rte_unused)
259 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
263 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
264 snprintf(xstats_names[count].name,
265 sizeof(xstats_names[count].name),
266 "rx_%s", vhost_rxport_stat_strings[t].name);
269 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
270 snprintf(xstats_names[count].name,
271 sizeof(xstats_names[count].name),
272 "tx_%s", vhost_txport_stat_strings[t].name);
279 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
284 unsigned int count = 0;
285 struct vhost_queue *vq = NULL;
286 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
291 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
292 xstats[count].value = 0;
293 for (i = 0; i < dev->data->nb_rx_queues; i++) {
294 vq = dev->data->rx_queues[i];
297 xstats[count].value +=
298 *(uint64_t *)(((char *)vq)
299 + vhost_rxport_stat_strings[t].offset);
301 xstats[count].id = count;
304 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
305 xstats[count].value = 0;
306 for (i = 0; i < dev->data->nb_tx_queues; i++) {
307 vq = dev->data->tx_queues[i];
310 xstats[count].value +=
311 *(uint64_t *)(((char *)vq)
312 + vhost_txport_stat_strings[t].offset);
314 xstats[count].id = count;
321 vhost_count_xcast_packets(struct vhost_queue *vq,
322 struct rte_mbuf *mbuf)
324 struct rte_ether_addr *ea = NULL;
325 struct vhost_stats *pstats = &vq->stats;
327 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
328 if (rte_is_multicast_ether_addr(ea)) {
329 if (rte_is_broadcast_ether_addr(ea))
330 pstats->xstats[VHOST_BROADCAST_PKT]++;
332 pstats->xstats[VHOST_MULTICAST_PKT]++;
334 pstats->xstats[VHOST_UNICAST_PKT]++;
338 static __rte_always_inline void
339 vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
341 uint32_t pkt_len = 0;
343 struct vhost_stats *pstats = &vq->stats;
345 pstats->xstats[VHOST_PKT]++;
346 pkt_len = buf->pkt_len;
348 pstats->xstats[VHOST_64_PKT]++;
349 } else if (pkt_len > 64 && pkt_len < 1024) {
350 index = (sizeof(pkt_len) * 8)
351 - __builtin_clz(pkt_len) - 5;
352 pstats->xstats[index]++;
355 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
356 else if (pkt_len <= 1522)
357 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
358 else if (pkt_len > 1522)
359 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
361 vhost_count_xcast_packets(vq, buf);
365 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
367 struct vhost_queue *r = q;
368 uint16_t i, nb_rx = 0;
369 uint16_t nb_receive = nb_bufs;
371 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
374 rte_atomic32_set(&r->while_queuing, 1);
376 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
379 /* Dequeue packets from guest TX queue */
382 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
383 VHOST_MAX_PKT_BURST);
385 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
386 r->mb_pool, &bufs[nb_rx],
390 nb_receive -= nb_pkts;
395 r->stats.pkts += nb_rx;
397 for (i = 0; likely(i < nb_rx); i++) {
398 bufs[i]->port = r->port;
399 bufs[i]->vlan_tci = 0;
401 if (r->internal->vlan_strip)
402 rte_vlan_strip(bufs[i]);
404 r->stats.bytes += bufs[i]->pkt_len;
405 r->stats.xstats[VHOST_BYTE] += bufs[i]->pkt_len;
407 vhost_update_single_packet_xstats(r, bufs[i]);
411 rte_atomic32_set(&r->while_queuing, 0);
417 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
419 struct vhost_queue *r = q;
420 uint16_t i, nb_tx = 0;
421 uint16_t nb_send = 0;
422 uint64_t nb_bytes = 0;
423 uint64_t nb_missed = 0;
425 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
428 rte_atomic32_set(&r->while_queuing, 1);
430 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
433 for (i = 0; i < nb_bufs; i++) {
434 struct rte_mbuf *m = bufs[i];
436 /* Do VLAN tag insertion */
437 if (m->ol_flags & PKT_TX_VLAN_PKT) {
438 int error = rte_vlan_insert(&m);
439 if (unlikely(error)) {
449 /* Enqueue packets to guest RX queue */
452 uint16_t num = (uint16_t)RTE_MIN(nb_send,
453 VHOST_MAX_PKT_BURST);
455 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
464 for (i = 0; likely(i < nb_tx); i++) {
465 nb_bytes += bufs[i]->pkt_len;
466 vhost_update_single_packet_xstats(r, bufs[i]);
469 nb_missed = nb_bufs - nb_tx;
471 r->stats.pkts += nb_tx;
472 r->stats.bytes += nb_bytes;
473 r->stats.missed_pkts += nb_missed;
475 r->stats.xstats[VHOST_BYTE] += nb_bytes;
476 r->stats.xstats[VHOST_MISSED_PKT] += nb_missed;
477 r->stats.xstats[VHOST_UNICAST_PKT] += nb_missed;
479 /* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
480 * ifHCOutBroadcastPkts counters are increased when packets are not
481 * transmitted successfully.
483 for (i = nb_tx; i < nb_bufs; i++)
484 vhost_count_xcast_packets(r, bufs[i]);
486 for (i = 0; likely(i < nb_tx); i++)
487 rte_pktmbuf_free(bufs[i]);
489 rte_atomic32_set(&r->while_queuing, 0);
494 static inline struct internal_list *
495 find_internal_resource(char *ifname)
498 struct internal_list *list;
499 struct pmd_internal *internal;
504 pthread_mutex_lock(&internal_list_lock);
506 TAILQ_FOREACH(list, &internal_list, next) {
507 internal = list->eth_dev->data->dev_private;
508 if (!strcmp(internal->iface_name, ifname)) {
514 pthread_mutex_unlock(&internal_list_lock);
523 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
525 struct rte_intr_handle *handle = eth_dev->intr_handle;
526 struct rte_epoll_event rev;
532 if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd)
535 VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
538 if (handle->elist[rxq_idx].fd != -1)
539 VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
540 handle->elist[rxq_idx].fd);
543 * First remove invalid epoll event, and then install
544 * the new one. May be solved with a proper API in the
547 epfd = handle->elist[rxq_idx].epfd;
548 rev = handle->elist[rxq_idx];
549 ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
550 &handle->elist[rxq_idx]);
552 VHOST_LOG(ERR, "Delete epoll event failed.\n");
556 rev.fd = handle->efds[rxq_idx];
557 handle->elist[rxq_idx] = rev;
558 ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd,
559 &handle->elist[rxq_idx]);
561 VHOST_LOG(ERR, "Add epoll event failed.\n");
569 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
571 struct rte_vhost_vring vring;
572 struct vhost_queue *vq;
573 int old_intr_enable, ret = 0;
575 vq = dev->data->rx_queues[qid];
577 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
581 rte_spinlock_lock(&vq->intr_lock);
582 old_intr_enable = vq->intr_enable;
584 ret = eth_vhost_update_intr(dev, qid);
585 rte_spinlock_unlock(&vq->intr_lock);
588 VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
589 vq->intr_enable = old_intr_enable;
593 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
595 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
598 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
599 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
606 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
608 struct rte_vhost_vring vring;
609 struct vhost_queue *vq;
612 vq = dev->data->rx_queues[qid];
614 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
618 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
620 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
623 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
624 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
633 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
635 struct rte_intr_handle *intr_handle = dev->intr_handle;
638 if (intr_handle->intr_vec)
639 free(intr_handle->intr_vec);
643 dev->intr_handle = NULL;
647 eth_vhost_install_intr(struct rte_eth_dev *dev)
649 struct rte_vhost_vring vring;
650 struct vhost_queue *vq;
651 int nb_rxq = dev->data->nb_rx_queues;
655 /* uninstall firstly if we are reconnecting */
656 if (dev->intr_handle)
657 eth_vhost_uninstall_intr(dev);
659 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
660 if (!dev->intr_handle) {
661 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
664 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
666 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
668 dev->intr_handle->intr_vec =
669 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
671 if (!dev->intr_handle->intr_vec) {
673 "Failed to allocate memory for interrupt vector\n");
674 free(dev->intr_handle);
678 VHOST_LOG(INFO, "Prepare intr vec\n");
679 for (i = 0; i < nb_rxq; i++) {
680 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
681 dev->intr_handle->efds[i] = -1;
682 vq = dev->data->rx_queues[i];
684 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
688 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
691 "Failed to get rxq-%d's vring, skip!\n", i);
695 if (vring.kickfd < 0) {
697 "rxq-%d's kickfd is invalid, skip!\n", i);
700 dev->intr_handle->efds[i] = vring.kickfd;
701 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
704 dev->intr_handle->nb_efd = nb_rxq;
705 dev->intr_handle->max_intr = nb_rxq + 1;
706 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
712 update_queuing_status(struct rte_eth_dev *dev)
714 struct pmd_internal *internal = dev->data->dev_private;
715 struct vhost_queue *vq;
717 int allow_queuing = 1;
719 if (!dev->data->rx_queues || !dev->data->tx_queues)
722 if (rte_atomic32_read(&internal->started) == 0 ||
723 rte_atomic32_read(&internal->dev_attached) == 0)
726 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
728 vq = dev->data->rx_queues[i];
731 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
732 while (rte_atomic32_read(&vq->while_queuing))
736 for (i = 0; i < dev->data->nb_tx_queues; i++) {
737 vq = dev->data->tx_queues[i];
740 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
741 while (rte_atomic32_read(&vq->while_queuing))
747 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
749 struct vhost_queue *vq;
752 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
753 vq = eth_dev->data->rx_queues[i];
756 vq->vid = internal->vid;
757 vq->internal = internal;
758 vq->port = eth_dev->data->port_id;
760 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
761 vq = eth_dev->data->tx_queues[i];
764 vq->vid = internal->vid;
765 vq->internal = internal;
766 vq->port = eth_dev->data->port_id;
773 struct rte_eth_dev *eth_dev;
774 struct internal_list *list;
775 struct pmd_internal *internal;
776 struct rte_eth_conf *dev_conf;
778 char ifname[PATH_MAX];
779 #ifdef RTE_LIBRTE_VHOST_NUMA
783 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
784 list = find_internal_resource(ifname);
786 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
790 eth_dev = list->eth_dev;
791 internal = eth_dev->data->dev_private;
792 dev_conf = ð_dev->data->dev_conf;
794 #ifdef RTE_LIBRTE_VHOST_NUMA
795 newnode = rte_vhost_get_numa_node(vid);
797 eth_dev->data->numa_node = newnode;
801 if (rte_atomic32_read(&internal->started) == 1) {
802 queue_setup(eth_dev, internal);
804 if (dev_conf->intr_conf.rxq) {
805 if (eth_vhost_install_intr(eth_dev) < 0) {
807 "Failed to install interrupt handler.");
812 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
815 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
816 rte_vhost_enable_guest_notification(vid, i, 0);
818 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
820 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
822 rte_atomic32_set(&internal->dev_attached, 1);
823 update_queuing_status(eth_dev);
825 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
827 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
833 destroy_device(int vid)
835 struct rte_eth_dev *eth_dev;
836 struct pmd_internal *internal;
837 struct vhost_queue *vq;
838 struct internal_list *list;
839 char ifname[PATH_MAX];
841 struct rte_vhost_vring_state *state;
843 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
844 list = find_internal_resource(ifname);
846 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
849 eth_dev = list->eth_dev;
850 internal = eth_dev->data->dev_private;
852 rte_atomic32_set(&internal->dev_attached, 0);
853 update_queuing_status(eth_dev);
855 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
857 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
858 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
859 vq = eth_dev->data->rx_queues[i];
864 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
865 vq = eth_dev->data->tx_queues[i];
872 state = vring_states[eth_dev->data->port_id];
873 rte_spinlock_lock(&state->lock);
874 for (i = 0; i <= state->max_vring; i++) {
875 state->cur[i] = false;
876 state->seen[i] = false;
878 state->max_vring = 0;
879 rte_spinlock_unlock(&state->lock);
881 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
882 eth_vhost_uninstall_intr(eth_dev);
884 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
888 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
890 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
891 struct pmd_internal *internal = eth_dev->data->dev_private;
892 struct vhost_queue *vq;
893 struct rte_vhost_vring vring;
894 int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
898 * The vring kickfd may be changed after the new device notification.
899 * Update it when the vring state is updated.
901 if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
902 rte_atomic32_read(&internal->dev_attached) &&
903 rte_atomic32_read(&internal->started) &&
904 dev_conf->intr_conf.rxq) {
905 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
907 VHOST_LOG(ERR, "Failed to get vring %d information.\n",
911 eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
913 vq = eth_dev->data->rx_queues[rx_idx];
915 VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
919 rte_spinlock_lock(&vq->intr_lock);
921 ret = eth_vhost_update_intr(eth_dev, rx_idx);
922 rte_spinlock_unlock(&vq->intr_lock);
929 vring_state_changed(int vid, uint16_t vring, int enable)
931 struct rte_vhost_vring_state *state;
932 struct rte_eth_dev *eth_dev;
933 struct internal_list *list;
934 char ifname[PATH_MAX];
936 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
937 list = find_internal_resource(ifname);
939 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
943 eth_dev = list->eth_dev;
945 state = vring_states[eth_dev->data->port_id];
947 if (enable && vring_conf_update(vid, eth_dev, vring))
948 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
951 rte_spinlock_lock(&state->lock);
952 if (state->cur[vring] == enable) {
953 rte_spinlock_unlock(&state->lock);
956 state->cur[vring] = enable;
957 state->max_vring = RTE_MAX(vring, state->max_vring);
958 rte_spinlock_unlock(&state->lock);
960 VHOST_LOG(INFO, "vring%u is %s\n",
961 vring, enable ? "enabled" : "disabled");
963 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
968 static struct vhost_device_ops vhost_ops = {
969 .new_device = new_device,
970 .destroy_device = destroy_device,
971 .vring_state_changed = vring_state_changed,
975 vhost_driver_setup(struct rte_eth_dev *eth_dev)
977 struct pmd_internal *internal = eth_dev->data->dev_private;
978 struct internal_list *list = NULL;
979 struct rte_vhost_vring_state *vring_state = NULL;
980 unsigned int numa_node = eth_dev->device->numa_node;
981 const char *name = eth_dev->device->name;
983 /* Don't try to setup again if it has already been done. */
984 list = find_internal_resource(internal->iface_name);
988 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
992 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
994 if (vring_state == NULL)
997 list->eth_dev = eth_dev;
998 pthread_mutex_lock(&internal_list_lock);
999 TAILQ_INSERT_TAIL(&internal_list, list, next);
1000 pthread_mutex_unlock(&internal_list_lock);
1002 rte_spinlock_init(&vring_state->lock);
1003 vring_states[eth_dev->data->port_id] = vring_state;
1005 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
1008 if (internal->disable_flags) {
1009 if (rte_vhost_driver_disable_features(internal->iface_name,
1010 internal->disable_flags))
1014 if (rte_vhost_driver_callback_register(internal->iface_name,
1016 VHOST_LOG(ERR, "Can't register callbacks\n");
1020 if (rte_vhost_driver_start(internal->iface_name) < 0) {
1021 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1022 internal->iface_name);
1029 rte_vhost_driver_unregister(internal->iface_name);
1031 vring_states[eth_dev->data->port_id] = NULL;
1032 pthread_mutex_lock(&internal_list_lock);
1033 TAILQ_REMOVE(&internal_list, list, next);
1034 pthread_mutex_unlock(&internal_list_lock);
1035 rte_free(vring_state);
1043 rte_eth_vhost_get_queue_event(uint16_t port_id,
1044 struct rte_eth_vhost_queue_event *event)
1046 struct rte_vhost_vring_state *state;
1050 if (port_id >= RTE_MAX_ETHPORTS) {
1051 VHOST_LOG(ERR, "Invalid port id\n");
1055 state = vring_states[port_id];
1057 VHOST_LOG(ERR, "Unused port\n");
1061 rte_spinlock_lock(&state->lock);
1062 for (i = 0; i <= state->max_vring; i++) {
1063 idx = state->index++ % (state->max_vring + 1);
1065 if (state->cur[idx] != state->seen[idx]) {
1066 state->seen[idx] = state->cur[idx];
1067 event->queue_id = idx / 2;
1068 event->rx = idx & 1;
1069 event->enable = state->cur[idx];
1070 rte_spinlock_unlock(&state->lock);
1074 rte_spinlock_unlock(&state->lock);
1080 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1082 struct internal_list *list;
1083 struct rte_eth_dev *eth_dev;
1084 struct vhost_queue *vq;
1087 if (!rte_eth_dev_is_valid_port(port_id))
1090 pthread_mutex_lock(&internal_list_lock);
1092 TAILQ_FOREACH(list, &internal_list, next) {
1093 eth_dev = list->eth_dev;
1094 if (eth_dev->data->port_id == port_id) {
1095 vq = eth_dev->data->rx_queues[0];
1103 pthread_mutex_unlock(&internal_list_lock);
1109 eth_dev_configure(struct rte_eth_dev *dev)
1111 struct pmd_internal *internal = dev->data->dev_private;
1112 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1114 /* NOTE: the same process has to operate a vhost interface
1115 * from beginning to end (from eth_dev configure to eth_dev close).
1116 * It is user's responsibility at the moment.
1118 if (vhost_driver_setup(dev) < 0)
1121 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1127 eth_dev_start(struct rte_eth_dev *eth_dev)
1129 struct pmd_internal *internal = eth_dev->data->dev_private;
1130 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1132 queue_setup(eth_dev, internal);
1134 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1135 if (dev_conf->intr_conf.rxq) {
1136 if (eth_vhost_install_intr(eth_dev) < 0) {
1138 "Failed to install interrupt handler.");
1144 rte_atomic32_set(&internal->started, 1);
1145 update_queuing_status(eth_dev);
1151 eth_dev_stop(struct rte_eth_dev *dev)
1153 struct pmd_internal *internal = dev->data->dev_private;
1155 dev->data->dev_started = 0;
1156 rte_atomic32_set(&internal->started, 0);
1157 update_queuing_status(dev);
1163 eth_dev_close(struct rte_eth_dev *dev)
1165 struct pmd_internal *internal;
1166 struct internal_list *list;
1167 unsigned int i, ret;
1169 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1172 internal = dev->data->dev_private;
1176 ret = eth_dev_stop(dev);
1178 list = find_internal_resource(internal->iface_name);
1180 rte_vhost_driver_unregister(internal->iface_name);
1181 pthread_mutex_lock(&internal_list_lock);
1182 TAILQ_REMOVE(&internal_list, list, next);
1183 pthread_mutex_unlock(&internal_list_lock);
1187 if (dev->data->rx_queues)
1188 for (i = 0; i < dev->data->nb_rx_queues; i++)
1189 rte_free(dev->data->rx_queues[i]);
1191 if (dev->data->tx_queues)
1192 for (i = 0; i < dev->data->nb_tx_queues; i++)
1193 rte_free(dev->data->tx_queues[i]);
1195 rte_free(internal->iface_name);
1198 dev->data->dev_private = NULL;
1200 rte_free(vring_states[dev->data->port_id]);
1201 vring_states[dev->data->port_id] = NULL;
1207 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1208 uint16_t nb_rx_desc __rte_unused,
1209 unsigned int socket_id,
1210 const struct rte_eth_rxconf *rx_conf __rte_unused,
1211 struct rte_mempool *mb_pool)
1213 struct vhost_queue *vq;
1215 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1216 RTE_CACHE_LINE_SIZE, socket_id);
1218 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1222 vq->mb_pool = mb_pool;
1223 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1224 rte_spinlock_init(&vq->intr_lock);
1225 dev->data->rx_queues[rx_queue_id] = vq;
1231 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1232 uint16_t nb_tx_desc __rte_unused,
1233 unsigned int socket_id,
1234 const struct rte_eth_txconf *tx_conf __rte_unused)
1236 struct vhost_queue *vq;
1238 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1239 RTE_CACHE_LINE_SIZE, socket_id);
1241 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1245 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1246 rte_spinlock_init(&vq->intr_lock);
1247 dev->data->tx_queues[tx_queue_id] = vq;
1253 eth_dev_info(struct rte_eth_dev *dev,
1254 struct rte_eth_dev_info *dev_info)
1256 struct pmd_internal *internal;
1258 internal = dev->data->dev_private;
1259 if (internal == NULL) {
1260 VHOST_LOG(ERR, "Invalid device specified\n");
1264 dev_info->max_mac_addrs = 1;
1265 dev_info->max_rx_pktlen = (uint32_t)-1;
1266 dev_info->max_rx_queues = internal->max_queues;
1267 dev_info->max_tx_queues = internal->max_queues;
1268 dev_info->min_rx_bufsize = 0;
1270 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1271 DEV_TX_OFFLOAD_VLAN_INSERT;
1272 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1278 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1281 unsigned long rx_total = 0, tx_total = 0;
1282 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1283 struct vhost_queue *vq;
1285 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1286 i < dev->data->nb_rx_queues; i++) {
1287 if (dev->data->rx_queues[i] == NULL)
1289 vq = dev->data->rx_queues[i];
1290 stats->q_ipackets[i] = vq->stats.pkts;
1291 rx_total += stats->q_ipackets[i];
1293 stats->q_ibytes[i] = vq->stats.bytes;
1294 rx_total_bytes += stats->q_ibytes[i];
1297 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1298 i < dev->data->nb_tx_queues; i++) {
1299 if (dev->data->tx_queues[i] == NULL)
1301 vq = dev->data->tx_queues[i];
1302 stats->q_opackets[i] = vq->stats.pkts;
1303 tx_total += stats->q_opackets[i];
1305 stats->q_obytes[i] = vq->stats.bytes;
1306 tx_total_bytes += stats->q_obytes[i];
1309 stats->ipackets = rx_total;
1310 stats->opackets = tx_total;
1311 stats->ibytes = rx_total_bytes;
1312 stats->obytes = tx_total_bytes;
1318 eth_stats_reset(struct rte_eth_dev *dev)
1320 struct vhost_queue *vq;
1323 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1324 if (dev->data->rx_queues[i] == NULL)
1326 vq = dev->data->rx_queues[i];
1328 vq->stats.bytes = 0;
1330 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1331 if (dev->data->tx_queues[i] == NULL)
1333 vq = dev->data->tx_queues[i];
1335 vq->stats.bytes = 0;
1336 vq->stats.missed_pkts = 0;
1343 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1345 rte_free(dev->data->rx_queues[qid]);
1349 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1351 rte_free(dev->data->tx_queues[qid]);
1355 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1358 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1359 * and releases mbuf, so nothing to cleanup.
1365 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1366 int wait_to_complete __rte_unused)
1372 eth_rx_queue_count(void *rx_queue)
1374 struct vhost_queue *vq;
1380 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1383 static const struct eth_dev_ops ops = {
1384 .dev_start = eth_dev_start,
1385 .dev_stop = eth_dev_stop,
1386 .dev_close = eth_dev_close,
1387 .dev_configure = eth_dev_configure,
1388 .dev_infos_get = eth_dev_info,
1389 .rx_queue_setup = eth_rx_queue_setup,
1390 .tx_queue_setup = eth_tx_queue_setup,
1391 .rx_queue_release = eth_rx_queue_release,
1392 .tx_queue_release = eth_tx_queue_release,
1393 .tx_done_cleanup = eth_tx_done_cleanup,
1394 .link_update = eth_link_update,
1395 .stats_get = eth_stats_get,
1396 .stats_reset = eth_stats_reset,
1397 .xstats_reset = vhost_dev_xstats_reset,
1398 .xstats_get = vhost_dev_xstats_get,
1399 .xstats_get_names = vhost_dev_xstats_get_names,
1400 .rx_queue_intr_enable = eth_rxq_intr_enable,
1401 .rx_queue_intr_disable = eth_rxq_intr_disable,
1405 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1406 int16_t queues, const unsigned int numa_node, uint64_t flags,
1407 uint64_t disable_flags)
1409 const char *name = rte_vdev_device_name(dev);
1410 struct rte_eth_dev_data *data;
1411 struct pmd_internal *internal = NULL;
1412 struct rte_eth_dev *eth_dev = NULL;
1413 struct rte_ether_addr *eth_addr = NULL;
1415 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1418 /* reserve an ethdev entry */
1419 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1420 if (eth_dev == NULL)
1422 data = eth_dev->data;
1424 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1425 if (eth_addr == NULL)
1427 data->mac_addrs = eth_addr;
1428 *eth_addr = base_eth_addr;
1429 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1431 /* now put it all together
1432 * - store queue data in internal,
1433 * - point eth_dev_data to internals
1434 * - and point eth_dev structure to new eth_dev_data structure
1436 internal = eth_dev->data->dev_private;
1437 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1439 if (internal->iface_name == NULL)
1441 strcpy(internal->iface_name, iface_name);
1443 data->nb_rx_queues = queues;
1444 data->nb_tx_queues = queues;
1445 internal->max_queues = queues;
1447 internal->flags = flags;
1448 internal->disable_flags = disable_flags;
1449 data->dev_link = pmd_link;
1450 data->dev_flags = RTE_ETH_DEV_INTR_LSC |
1451 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1452 data->promiscuous = 1;
1453 data->all_multicast = 1;
1455 eth_dev->dev_ops = &ops;
1456 eth_dev->rx_queue_count = eth_rx_queue_count;
1458 /* finally assign rx and tx ops */
1459 eth_dev->rx_pkt_burst = eth_vhost_rx;
1460 eth_dev->tx_pkt_burst = eth_vhost_tx;
1462 rte_eth_dev_probing_finish(eth_dev);
1467 rte_free(internal->iface_name);
1468 rte_eth_dev_release_port(eth_dev);
1474 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1476 const char **iface_name = extra_args;
1481 *iface_name = value;
1487 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1489 uint16_t *n = extra_args;
1491 if (value == NULL || extra_args == NULL)
1494 *n = (uint16_t)strtoul(value, NULL, 0);
1495 if (*n == USHRT_MAX && errno == ERANGE)
1502 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1504 struct rte_kvargs *kvlist = NULL;
1509 uint64_t disable_flags = 0;
1510 int client_mode = 0;
1511 int iommu_support = 0;
1512 int postcopy_support = 0;
1516 struct rte_eth_dev *eth_dev;
1517 const char *name = rte_vdev_device_name(dev);
1519 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1521 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1522 eth_dev = rte_eth_dev_attach_secondary(name);
1524 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1527 eth_dev->rx_pkt_burst = eth_vhost_rx;
1528 eth_dev->tx_pkt_burst = eth_vhost_tx;
1529 eth_dev->dev_ops = &ops;
1530 if (dev->device.numa_node == SOCKET_ID_ANY)
1531 dev->device.numa_node = rte_socket_id();
1532 eth_dev->device = &dev->device;
1533 rte_eth_dev_probing_finish(eth_dev);
1537 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1541 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1542 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1543 &open_iface, &iface_name);
1551 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1552 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1553 &open_int, &queues);
1554 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1560 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1561 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1562 &open_int, &client_mode);
1567 flags |= RTE_VHOST_USER_CLIENT;
1570 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1571 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1572 &open_int, &iommu_support);
1577 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1580 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1581 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1582 &open_int, &postcopy_support);
1586 if (postcopy_support)
1587 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1590 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1591 ret = rte_kvargs_process(kvlist,
1592 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1598 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1599 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1603 if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1604 ret = rte_kvargs_process(kvlist,
1605 ETH_VHOST_LINEAR_BUF,
1606 &open_int, &linear_buf);
1610 if (linear_buf == 1)
1611 flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1614 if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1615 ret = rte_kvargs_process(kvlist,
1617 &open_int, &ext_buf);
1622 flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1625 if (dev->device.numa_node == SOCKET_ID_ANY)
1626 dev->device.numa_node = rte_socket_id();
1628 ret = eth_dev_vhost_create(dev, iface_name, queues,
1629 dev->device.numa_node, flags, disable_flags);
1631 VHOST_LOG(ERR, "Failed to create %s\n", name);
1634 rte_kvargs_free(kvlist);
1639 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1642 struct rte_eth_dev *eth_dev = NULL;
1644 name = rte_vdev_device_name(dev);
1645 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1647 /* find an ethdev entry */
1648 eth_dev = rte_eth_dev_allocated(name);
1649 if (eth_dev == NULL)
1652 eth_dev_close(eth_dev);
1653 rte_eth_dev_release_port(eth_dev);
1658 static struct rte_vdev_driver pmd_vhost_drv = {
1659 .probe = rte_pmd_vhost_probe,
1660 .remove = rte_pmd_vhost_remove,
1663 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1664 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1665 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1669 "iommu-support=<0|1> "
1670 "postcopy-support=<0|1> "
1672 "linear-buffer=<0|1> "
1673 "ext-buffer=<0|1>");