1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
20 #include "rte_eth_vhost.h"
22 RTE_LOG_REGISTER(vhost_logtype, pmd.net.vhost, NOTICE);
24 #define VHOST_LOG(level, ...) \
25 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
27 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
29 #define ETH_VHOST_IFACE_ARG "iface"
30 #define ETH_VHOST_QUEUES_ARG "queues"
31 #define ETH_VHOST_CLIENT_ARG "client"
32 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
33 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
34 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
35 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
36 #define ETH_VHOST_LINEAR_BUF "linear-buffer"
37 #define ETH_VHOST_EXT_BUF "ext-buffer"
38 #define VHOST_MAX_PKT_BURST 32
40 static const char *valid_arguments[] = {
44 ETH_VHOST_DEQUEUE_ZERO_COPY,
45 ETH_VHOST_IOMMU_SUPPORT,
46 ETH_VHOST_POSTCOPY_SUPPORT,
47 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
53 static struct rte_ether_addr base_eth_addr = {
64 enum vhost_xstats_pkts {
65 VHOST_UNDERSIZE_PKT = 0,
70 VHOST_512_TO_1023_PKT,
71 VHOST_1024_TO_1522_PKT,
72 VHOST_1523_TO_MAX_PKT,
77 VHOST_ERRORS_FRAGMENTED,
79 VHOST_UNKNOWN_PROTOCOL,
87 uint64_t xstats[VHOST_XSTATS_MAX];
92 rte_atomic32_t allow_queuing;
93 rte_atomic32_t while_queuing;
94 struct pmd_internal *internal;
95 struct rte_mempool *mb_pool;
97 uint16_t virtqueue_id;
98 struct vhost_stats stats;
100 rte_spinlock_t intr_lock;
103 struct pmd_internal {
104 rte_atomic32_t dev_attached;
107 uint64_t disable_flags;
110 rte_atomic32_t started;
114 struct internal_list {
115 TAILQ_ENTRY(internal_list) next;
116 struct rte_eth_dev *eth_dev;
119 TAILQ_HEAD(internal_list_head, internal_list);
120 static struct internal_list_head internal_list =
121 TAILQ_HEAD_INITIALIZER(internal_list);
123 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
125 static struct rte_eth_link pmd_link = {
127 .link_duplex = ETH_LINK_FULL_DUPLEX,
128 .link_status = ETH_LINK_DOWN
131 struct rte_vhost_vring_state {
134 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
135 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
137 unsigned int max_vring;
140 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
142 #define VHOST_XSTATS_NAME_SIZE 64
144 struct vhost_xstats_name_off {
145 char name[VHOST_XSTATS_NAME_SIZE];
149 /* [rx]_is prepended to the name string here */
150 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
152 offsetof(struct vhost_queue, stats.pkts)},
154 offsetof(struct vhost_queue, stats.bytes)},
156 offsetof(struct vhost_queue, stats.missed_pkts)},
157 {"broadcast_packets",
158 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
159 {"multicast_packets",
160 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
162 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
163 {"undersize_packets",
164 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
166 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
167 {"size_65_to_127_packets",
168 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
169 {"size_128_to_255_packets",
170 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
171 {"size_256_to_511_packets",
172 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
173 {"size_512_to_1023_packets",
174 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
175 {"size_1024_to_1522_packets",
176 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
177 {"size_1523_to_max_packets",
178 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
179 {"errors_with_bad_CRC",
180 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
181 {"fragmented_errors",
182 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
184 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
185 {"unknown_protos_packets",
186 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
189 /* [tx]_ is prepended to the name string here */
190 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
192 offsetof(struct vhost_queue, stats.pkts)},
194 offsetof(struct vhost_queue, stats.bytes)},
196 offsetof(struct vhost_queue, stats.missed_pkts)},
197 {"broadcast_packets",
198 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
199 {"multicast_packets",
200 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
202 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
203 {"undersize_packets",
204 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
206 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
207 {"size_65_to_127_packets",
208 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
209 {"size_128_to_255_packets",
210 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
211 {"size_256_to_511_packets",
212 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
213 {"size_512_to_1023_packets",
214 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
215 {"size_1024_to_1522_packets",
216 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
217 {"size_1523_to_max_packets",
218 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
219 {"errors_with_bad_CRC",
220 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
223 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
224 sizeof(vhost_rxport_stat_strings[0]))
226 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
227 sizeof(vhost_txport_stat_strings[0]))
230 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
232 struct vhost_queue *vq = NULL;
235 for (i = 0; i < dev->data->nb_rx_queues; i++) {
236 vq = dev->data->rx_queues[i];
239 memset(&vq->stats, 0, sizeof(vq->stats));
241 for (i = 0; i < dev->data->nb_tx_queues; i++) {
242 vq = dev->data->tx_queues[i];
245 memset(&vq->stats, 0, sizeof(vq->stats));
252 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
253 struct rte_eth_xstat_name *xstats_names,
254 unsigned int limit __rte_unused)
258 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
262 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
263 snprintf(xstats_names[count].name,
264 sizeof(xstats_names[count].name),
265 "rx_%s", vhost_rxport_stat_strings[t].name);
268 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
269 snprintf(xstats_names[count].name,
270 sizeof(xstats_names[count].name),
271 "tx_%s", vhost_txport_stat_strings[t].name);
278 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
283 unsigned int count = 0;
284 struct vhost_queue *vq = NULL;
285 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
290 for (i = 0; i < dev->data->nb_rx_queues; i++) {
291 vq = dev->data->rx_queues[i];
294 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
295 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
296 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
298 for (i = 0; i < dev->data->nb_tx_queues; i++) {
299 vq = dev->data->tx_queues[i];
302 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
303 + vq->stats.missed_pkts
304 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
305 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
307 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
308 xstats[count].value = 0;
309 for (i = 0; i < dev->data->nb_rx_queues; i++) {
310 vq = dev->data->rx_queues[i];
313 xstats[count].value +=
314 *(uint64_t *)(((char *)vq)
315 + vhost_rxport_stat_strings[t].offset);
317 xstats[count].id = count;
320 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
321 xstats[count].value = 0;
322 for (i = 0; i < dev->data->nb_tx_queues; i++) {
323 vq = dev->data->tx_queues[i];
326 xstats[count].value +=
327 *(uint64_t *)(((char *)vq)
328 + vhost_txport_stat_strings[t].offset);
330 xstats[count].id = count;
337 vhost_count_multicast_broadcast(struct vhost_queue *vq,
338 struct rte_mbuf *mbuf)
340 struct rte_ether_addr *ea = NULL;
341 struct vhost_stats *pstats = &vq->stats;
343 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
344 if (rte_is_multicast_ether_addr(ea)) {
345 if (rte_is_broadcast_ether_addr(ea))
346 pstats->xstats[VHOST_BROADCAST_PKT]++;
348 pstats->xstats[VHOST_MULTICAST_PKT]++;
353 vhost_update_packet_xstats(struct vhost_queue *vq,
354 struct rte_mbuf **bufs,
357 uint32_t pkt_len = 0;
360 struct vhost_stats *pstats = &vq->stats;
362 for (i = 0; i < count ; i++) {
363 pkt_len = bufs[i]->pkt_len;
365 pstats->xstats[VHOST_64_PKT]++;
366 } else if (pkt_len > 64 && pkt_len < 1024) {
367 index = (sizeof(pkt_len) * 8)
368 - __builtin_clz(pkt_len) - 5;
369 pstats->xstats[index]++;
372 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
373 else if (pkt_len <= 1522)
374 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
375 else if (pkt_len > 1522)
376 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
378 vhost_count_multicast_broadcast(vq, bufs[i]);
383 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
385 struct vhost_queue *r = q;
386 uint16_t i, nb_rx = 0;
387 uint16_t nb_receive = nb_bufs;
389 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
392 rte_atomic32_set(&r->while_queuing, 1);
394 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
397 /* Dequeue packets from guest TX queue */
400 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
401 VHOST_MAX_PKT_BURST);
403 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
404 r->mb_pool, &bufs[nb_rx],
408 nb_receive -= nb_pkts;
413 r->stats.pkts += nb_rx;
415 for (i = 0; likely(i < nb_rx); i++) {
416 bufs[i]->port = r->port;
417 bufs[i]->vlan_tci = 0;
419 if (r->internal->vlan_strip)
420 rte_vlan_strip(bufs[i]);
422 r->stats.bytes += bufs[i]->pkt_len;
425 vhost_update_packet_xstats(r, bufs, nb_rx);
428 rte_atomic32_set(&r->while_queuing, 0);
434 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
436 struct vhost_queue *r = q;
437 uint16_t i, nb_tx = 0;
438 uint16_t nb_send = 0;
440 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
443 rte_atomic32_set(&r->while_queuing, 1);
445 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
448 for (i = 0; i < nb_bufs; i++) {
449 struct rte_mbuf *m = bufs[i];
451 /* Do VLAN tag insertion */
452 if (m->ol_flags & PKT_TX_VLAN_PKT) {
453 int error = rte_vlan_insert(&m);
454 if (unlikely(error)) {
464 /* Enqueue packets to guest RX queue */
467 uint16_t num = (uint16_t)RTE_MIN(nb_send,
468 VHOST_MAX_PKT_BURST);
470 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
479 r->stats.pkts += nb_tx;
480 r->stats.missed_pkts += nb_bufs - nb_tx;
482 for (i = 0; likely(i < nb_tx); i++)
483 r->stats.bytes += bufs[i]->pkt_len;
485 vhost_update_packet_xstats(r, bufs, nb_tx);
487 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
488 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
489 * are increased when packets are not transmitted successfully.
491 for (i = nb_tx; i < nb_bufs; i++)
492 vhost_count_multicast_broadcast(r, bufs[i]);
494 for (i = 0; likely(i < nb_tx); i++)
495 rte_pktmbuf_free(bufs[i]);
497 rte_atomic32_set(&r->while_queuing, 0);
502 static inline struct internal_list *
503 find_internal_resource(char *ifname)
506 struct internal_list *list;
507 struct pmd_internal *internal;
512 pthread_mutex_lock(&internal_list_lock);
514 TAILQ_FOREACH(list, &internal_list, next) {
515 internal = list->eth_dev->data->dev_private;
516 if (!strcmp(internal->iface_name, ifname)) {
522 pthread_mutex_unlock(&internal_list_lock);
531 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
533 struct rte_intr_handle *handle = eth_dev->intr_handle;
534 struct rte_epoll_event rev;
540 if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd)
543 VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
546 if (handle->elist[rxq_idx].fd != -1)
547 VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
548 handle->elist[rxq_idx].fd);
551 * First remove invalid epoll event, and then install
552 * the new one. May be solved with a proper API in the
555 epfd = handle->elist[rxq_idx].epfd;
556 rev = handle->elist[rxq_idx];
557 ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
558 &handle->elist[rxq_idx]);
560 VHOST_LOG(ERR, "Delete epoll event failed.\n");
564 rev.fd = handle->efds[rxq_idx];
565 handle->elist[rxq_idx] = rev;
566 ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd,
567 &handle->elist[rxq_idx]);
569 VHOST_LOG(ERR, "Add epoll event failed.\n");
577 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
579 struct rte_vhost_vring vring;
580 struct vhost_queue *vq;
581 int old_intr_enable, ret = 0;
583 vq = dev->data->rx_queues[qid];
585 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
589 rte_spinlock_lock(&vq->intr_lock);
590 old_intr_enable = vq->intr_enable;
592 ret = eth_vhost_update_intr(dev, qid);
593 rte_spinlock_unlock(&vq->intr_lock);
596 VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
597 vq->intr_enable = old_intr_enable;
601 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
603 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
606 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
607 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
614 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
616 struct rte_vhost_vring vring;
617 struct vhost_queue *vq;
620 vq = dev->data->rx_queues[qid];
622 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
626 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
628 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
631 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
632 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
641 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
643 struct rte_intr_handle *intr_handle = dev->intr_handle;
646 if (intr_handle->intr_vec)
647 free(intr_handle->intr_vec);
651 dev->intr_handle = NULL;
655 eth_vhost_install_intr(struct rte_eth_dev *dev)
657 struct rte_vhost_vring vring;
658 struct vhost_queue *vq;
659 int nb_rxq = dev->data->nb_rx_queues;
663 /* uninstall firstly if we are reconnecting */
664 if (dev->intr_handle)
665 eth_vhost_uninstall_intr(dev);
667 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
668 if (!dev->intr_handle) {
669 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
672 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
674 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
676 dev->intr_handle->intr_vec =
677 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
679 if (!dev->intr_handle->intr_vec) {
681 "Failed to allocate memory for interrupt vector\n");
682 free(dev->intr_handle);
686 VHOST_LOG(INFO, "Prepare intr vec\n");
687 for (i = 0; i < nb_rxq; i++) {
688 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
689 dev->intr_handle->efds[i] = -1;
690 vq = dev->data->rx_queues[i];
692 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
696 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
699 "Failed to get rxq-%d's vring, skip!\n", i);
703 if (vring.kickfd < 0) {
705 "rxq-%d's kickfd is invalid, skip!\n", i);
708 dev->intr_handle->efds[i] = vring.kickfd;
709 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
712 dev->intr_handle->nb_efd = nb_rxq;
713 dev->intr_handle->max_intr = nb_rxq + 1;
714 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
720 update_queuing_status(struct rte_eth_dev *dev)
722 struct pmd_internal *internal = dev->data->dev_private;
723 struct vhost_queue *vq;
725 int allow_queuing = 1;
727 if (!dev->data->rx_queues || !dev->data->tx_queues)
730 if (rte_atomic32_read(&internal->started) == 0 ||
731 rte_atomic32_read(&internal->dev_attached) == 0)
734 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
735 for (i = 0; i < dev->data->nb_rx_queues; i++) {
736 vq = dev->data->rx_queues[i];
739 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
740 while (rte_atomic32_read(&vq->while_queuing))
744 for (i = 0; i < dev->data->nb_tx_queues; i++) {
745 vq = dev->data->tx_queues[i];
748 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
749 while (rte_atomic32_read(&vq->while_queuing))
755 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
757 struct vhost_queue *vq;
760 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
761 vq = eth_dev->data->rx_queues[i];
764 vq->vid = internal->vid;
765 vq->internal = internal;
766 vq->port = eth_dev->data->port_id;
768 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
769 vq = eth_dev->data->tx_queues[i];
772 vq->vid = internal->vid;
773 vq->internal = internal;
774 vq->port = eth_dev->data->port_id;
781 struct rte_eth_dev *eth_dev;
782 struct internal_list *list;
783 struct pmd_internal *internal;
784 struct rte_eth_conf *dev_conf;
786 char ifname[PATH_MAX];
787 #ifdef RTE_LIBRTE_VHOST_NUMA
791 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
792 list = find_internal_resource(ifname);
794 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
798 eth_dev = list->eth_dev;
799 internal = eth_dev->data->dev_private;
800 dev_conf = ð_dev->data->dev_conf;
802 #ifdef RTE_LIBRTE_VHOST_NUMA
803 newnode = rte_vhost_get_numa_node(vid);
805 eth_dev->data->numa_node = newnode;
809 if (rte_atomic32_read(&internal->started) == 1) {
810 queue_setup(eth_dev, internal);
812 if (dev_conf->intr_conf.rxq) {
813 if (eth_vhost_install_intr(eth_dev) < 0) {
815 "Failed to install interrupt handler.");
820 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
823 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
824 rte_vhost_enable_guest_notification(vid, i, 0);
826 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
828 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
830 rte_atomic32_set(&internal->dev_attached, 1);
831 update_queuing_status(eth_dev);
833 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
835 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
841 destroy_device(int vid)
843 struct rte_eth_dev *eth_dev;
844 struct pmd_internal *internal;
845 struct vhost_queue *vq;
846 struct internal_list *list;
847 char ifname[PATH_MAX];
849 struct rte_vhost_vring_state *state;
851 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
852 list = find_internal_resource(ifname);
854 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
857 eth_dev = list->eth_dev;
858 internal = eth_dev->data->dev_private;
860 rte_atomic32_set(&internal->dev_attached, 0);
861 update_queuing_status(eth_dev);
863 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
865 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
866 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
867 vq = eth_dev->data->rx_queues[i];
872 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
873 vq = eth_dev->data->tx_queues[i];
880 state = vring_states[eth_dev->data->port_id];
881 rte_spinlock_lock(&state->lock);
882 for (i = 0; i <= state->max_vring; i++) {
883 state->cur[i] = false;
884 state->seen[i] = false;
886 state->max_vring = 0;
887 rte_spinlock_unlock(&state->lock);
889 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
890 eth_vhost_uninstall_intr(eth_dev);
892 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
896 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
898 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
899 struct pmd_internal *internal = eth_dev->data->dev_private;
900 struct vhost_queue *vq;
901 struct rte_vhost_vring vring;
902 int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
906 * The vring kickfd may be changed after the new device notification.
907 * Update it when the vring state is updated.
909 if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
910 rte_atomic32_read(&internal->dev_attached) &&
911 rte_atomic32_read(&internal->started) &&
912 dev_conf->intr_conf.rxq) {
913 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
915 VHOST_LOG(ERR, "Failed to get vring %d information.\n",
919 eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
921 vq = eth_dev->data->rx_queues[rx_idx];
923 VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
927 rte_spinlock_lock(&vq->intr_lock);
929 ret = eth_vhost_update_intr(eth_dev, rx_idx);
930 rte_spinlock_unlock(&vq->intr_lock);
937 vring_state_changed(int vid, uint16_t vring, int enable)
939 struct rte_vhost_vring_state *state;
940 struct rte_eth_dev *eth_dev;
941 struct internal_list *list;
942 char ifname[PATH_MAX];
944 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
945 list = find_internal_resource(ifname);
947 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
951 eth_dev = list->eth_dev;
953 state = vring_states[eth_dev->data->port_id];
955 if (enable && vring_conf_update(vid, eth_dev, vring))
956 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
959 rte_spinlock_lock(&state->lock);
960 if (state->cur[vring] == enable) {
961 rte_spinlock_unlock(&state->lock);
964 state->cur[vring] = enable;
965 state->max_vring = RTE_MAX(vring, state->max_vring);
966 rte_spinlock_unlock(&state->lock);
968 VHOST_LOG(INFO, "vring%u is %s\n",
969 vring, enable ? "enabled" : "disabled");
971 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
976 static struct vhost_device_ops vhost_ops = {
977 .new_device = new_device,
978 .destroy_device = destroy_device,
979 .vring_state_changed = vring_state_changed,
983 vhost_driver_setup(struct rte_eth_dev *eth_dev)
985 struct pmd_internal *internal = eth_dev->data->dev_private;
986 struct internal_list *list = NULL;
987 struct rte_vhost_vring_state *vring_state = NULL;
988 unsigned int numa_node = eth_dev->device->numa_node;
989 const char *name = eth_dev->device->name;
991 /* Don't try to setup again if it has already been done. */
992 list = find_internal_resource(internal->iface_name);
996 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1000 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
1002 if (vring_state == NULL)
1005 list->eth_dev = eth_dev;
1006 pthread_mutex_lock(&internal_list_lock);
1007 TAILQ_INSERT_TAIL(&internal_list, list, next);
1008 pthread_mutex_unlock(&internal_list_lock);
1010 rte_spinlock_init(&vring_state->lock);
1011 vring_states[eth_dev->data->port_id] = vring_state;
1013 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
1016 if (internal->disable_flags) {
1017 if (rte_vhost_driver_disable_features(internal->iface_name,
1018 internal->disable_flags))
1022 if (rte_vhost_driver_callback_register(internal->iface_name,
1024 VHOST_LOG(ERR, "Can't register callbacks\n");
1028 if (rte_vhost_driver_start(internal->iface_name) < 0) {
1029 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1030 internal->iface_name);
1037 rte_vhost_driver_unregister(internal->iface_name);
1039 vring_states[eth_dev->data->port_id] = NULL;
1040 pthread_mutex_lock(&internal_list_lock);
1041 TAILQ_REMOVE(&internal_list, list, next);
1042 pthread_mutex_unlock(&internal_list_lock);
1043 rte_free(vring_state);
1051 rte_eth_vhost_get_queue_event(uint16_t port_id,
1052 struct rte_eth_vhost_queue_event *event)
1054 struct rte_vhost_vring_state *state;
1058 if (port_id >= RTE_MAX_ETHPORTS) {
1059 VHOST_LOG(ERR, "Invalid port id\n");
1063 state = vring_states[port_id];
1065 VHOST_LOG(ERR, "Unused port\n");
1069 rte_spinlock_lock(&state->lock);
1070 for (i = 0; i <= state->max_vring; i++) {
1071 idx = state->index++ % (state->max_vring + 1);
1073 if (state->cur[idx] != state->seen[idx]) {
1074 state->seen[idx] = state->cur[idx];
1075 event->queue_id = idx / 2;
1076 event->rx = idx & 1;
1077 event->enable = state->cur[idx];
1078 rte_spinlock_unlock(&state->lock);
1082 rte_spinlock_unlock(&state->lock);
1088 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1090 struct internal_list *list;
1091 struct rte_eth_dev *eth_dev;
1092 struct vhost_queue *vq;
1095 if (!rte_eth_dev_is_valid_port(port_id))
1098 pthread_mutex_lock(&internal_list_lock);
1100 TAILQ_FOREACH(list, &internal_list, next) {
1101 eth_dev = list->eth_dev;
1102 if (eth_dev->data->port_id == port_id) {
1103 vq = eth_dev->data->rx_queues[0];
1111 pthread_mutex_unlock(&internal_list_lock);
1117 eth_dev_configure(struct rte_eth_dev *dev)
1119 struct pmd_internal *internal = dev->data->dev_private;
1120 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1122 /* NOTE: the same process has to operate a vhost interface
1123 * from beginning to end (from eth_dev configure to eth_dev close).
1124 * It is user's responsibility at the moment.
1126 if (vhost_driver_setup(dev) < 0)
1129 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1135 eth_dev_start(struct rte_eth_dev *eth_dev)
1137 struct pmd_internal *internal = eth_dev->data->dev_private;
1138 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1140 queue_setup(eth_dev, internal);
1142 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1143 if (dev_conf->intr_conf.rxq) {
1144 if (eth_vhost_install_intr(eth_dev) < 0) {
1146 "Failed to install interrupt handler.");
1152 rte_atomic32_set(&internal->started, 1);
1153 update_queuing_status(eth_dev);
1159 eth_dev_stop(struct rte_eth_dev *dev)
1161 struct pmd_internal *internal = dev->data->dev_private;
1163 rte_atomic32_set(&internal->started, 0);
1164 update_queuing_status(dev);
1168 eth_dev_close(struct rte_eth_dev *dev)
1170 struct pmd_internal *internal;
1171 struct internal_list *list;
1174 internal = dev->data->dev_private;
1180 list = find_internal_resource(internal->iface_name);
1182 rte_vhost_driver_unregister(internal->iface_name);
1183 pthread_mutex_lock(&internal_list_lock);
1184 TAILQ_REMOVE(&internal_list, list, next);
1185 pthread_mutex_unlock(&internal_list_lock);
1189 if (dev->data->rx_queues)
1190 for (i = 0; i < dev->data->nb_rx_queues; i++)
1191 rte_free(dev->data->rx_queues[i]);
1193 if (dev->data->tx_queues)
1194 for (i = 0; i < dev->data->nb_tx_queues; i++)
1195 rte_free(dev->data->tx_queues[i]);
1197 rte_free(internal->iface_name);
1200 dev->data->dev_private = NULL;
1202 rte_free(vring_states[dev->data->port_id]);
1203 vring_states[dev->data->port_id] = NULL;
1207 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1208 uint16_t nb_rx_desc __rte_unused,
1209 unsigned int socket_id,
1210 const struct rte_eth_rxconf *rx_conf __rte_unused,
1211 struct rte_mempool *mb_pool)
1213 struct vhost_queue *vq;
1215 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1216 RTE_CACHE_LINE_SIZE, socket_id);
1218 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1222 vq->mb_pool = mb_pool;
1223 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1224 rte_spinlock_init(&vq->intr_lock);
1225 dev->data->rx_queues[rx_queue_id] = vq;
1231 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1232 uint16_t nb_tx_desc __rte_unused,
1233 unsigned int socket_id,
1234 const struct rte_eth_txconf *tx_conf __rte_unused)
1236 struct vhost_queue *vq;
1238 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1239 RTE_CACHE_LINE_SIZE, socket_id);
1241 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1245 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1246 rte_spinlock_init(&vq->intr_lock);
1247 dev->data->tx_queues[tx_queue_id] = vq;
1253 eth_dev_info(struct rte_eth_dev *dev,
1254 struct rte_eth_dev_info *dev_info)
1256 struct pmd_internal *internal;
1258 internal = dev->data->dev_private;
1259 if (internal == NULL) {
1260 VHOST_LOG(ERR, "Invalid device specified\n");
1264 dev_info->max_mac_addrs = 1;
1265 dev_info->max_rx_pktlen = (uint32_t)-1;
1266 dev_info->max_rx_queues = internal->max_queues;
1267 dev_info->max_tx_queues = internal->max_queues;
1268 dev_info->min_rx_bufsize = 0;
1270 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1271 DEV_TX_OFFLOAD_VLAN_INSERT;
1272 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1278 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1281 unsigned long rx_total = 0, tx_total = 0;
1282 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1283 struct vhost_queue *vq;
1285 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1286 i < dev->data->nb_rx_queues; i++) {
1287 if (dev->data->rx_queues[i] == NULL)
1289 vq = dev->data->rx_queues[i];
1290 stats->q_ipackets[i] = vq->stats.pkts;
1291 rx_total += stats->q_ipackets[i];
1293 stats->q_ibytes[i] = vq->stats.bytes;
1294 rx_total_bytes += stats->q_ibytes[i];
1297 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1298 i < dev->data->nb_tx_queues; i++) {
1299 if (dev->data->tx_queues[i] == NULL)
1301 vq = dev->data->tx_queues[i];
1302 stats->q_opackets[i] = vq->stats.pkts;
1303 tx_total += stats->q_opackets[i];
1305 stats->q_obytes[i] = vq->stats.bytes;
1306 tx_total_bytes += stats->q_obytes[i];
1309 stats->ipackets = rx_total;
1310 stats->opackets = tx_total;
1311 stats->ibytes = rx_total_bytes;
1312 stats->obytes = tx_total_bytes;
1318 eth_stats_reset(struct rte_eth_dev *dev)
1320 struct vhost_queue *vq;
1323 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1324 if (dev->data->rx_queues[i] == NULL)
1326 vq = dev->data->rx_queues[i];
1328 vq->stats.bytes = 0;
1330 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1331 if (dev->data->tx_queues[i] == NULL)
1333 vq = dev->data->tx_queues[i];
1335 vq->stats.bytes = 0;
1336 vq->stats.missed_pkts = 0;
1343 eth_queue_release(void *q)
1349 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1352 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1353 * and releases mbuf, so nothing to cleanup.
1359 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1360 int wait_to_complete __rte_unused)
1366 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1368 struct vhost_queue *vq;
1370 vq = dev->data->rx_queues[rx_queue_id];
1374 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1377 static const struct eth_dev_ops ops = {
1378 .dev_start = eth_dev_start,
1379 .dev_stop = eth_dev_stop,
1380 .dev_close = eth_dev_close,
1381 .dev_configure = eth_dev_configure,
1382 .dev_infos_get = eth_dev_info,
1383 .rx_queue_setup = eth_rx_queue_setup,
1384 .tx_queue_setup = eth_tx_queue_setup,
1385 .rx_queue_release = eth_queue_release,
1386 .tx_queue_release = eth_queue_release,
1387 .tx_done_cleanup = eth_tx_done_cleanup,
1388 .link_update = eth_link_update,
1389 .stats_get = eth_stats_get,
1390 .stats_reset = eth_stats_reset,
1391 .xstats_reset = vhost_dev_xstats_reset,
1392 .xstats_get = vhost_dev_xstats_get,
1393 .xstats_get_names = vhost_dev_xstats_get_names,
1394 .rx_queue_intr_enable = eth_rxq_intr_enable,
1395 .rx_queue_intr_disable = eth_rxq_intr_disable,
1399 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1400 int16_t queues, const unsigned int numa_node, uint64_t flags,
1401 uint64_t disable_flags)
1403 const char *name = rte_vdev_device_name(dev);
1404 struct rte_eth_dev_data *data;
1405 struct pmd_internal *internal = NULL;
1406 struct rte_eth_dev *eth_dev = NULL;
1407 struct rte_ether_addr *eth_addr = NULL;
1409 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1412 /* reserve an ethdev entry */
1413 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1414 if (eth_dev == NULL)
1416 data = eth_dev->data;
1418 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1419 if (eth_addr == NULL)
1421 data->mac_addrs = eth_addr;
1422 *eth_addr = base_eth_addr;
1423 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1425 /* now put it all together
1426 * - store queue data in internal,
1427 * - point eth_dev_data to internals
1428 * - and point eth_dev structure to new eth_dev_data structure
1430 internal = eth_dev->data->dev_private;
1431 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1433 if (internal->iface_name == NULL)
1435 strcpy(internal->iface_name, iface_name);
1437 data->nb_rx_queues = queues;
1438 data->nb_tx_queues = queues;
1439 internal->max_queues = queues;
1441 internal->flags = flags;
1442 internal->disable_flags = disable_flags;
1443 data->dev_link = pmd_link;
1444 data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1445 data->promiscuous = 1;
1446 data->all_multicast = 1;
1448 eth_dev->dev_ops = &ops;
1449 eth_dev->rx_queue_count = eth_rx_queue_count;
1451 /* finally assign rx and tx ops */
1452 eth_dev->rx_pkt_burst = eth_vhost_rx;
1453 eth_dev->tx_pkt_burst = eth_vhost_tx;
1455 rte_eth_dev_probing_finish(eth_dev);
1460 rte_free(internal->iface_name);
1461 rte_eth_dev_release_port(eth_dev);
1467 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1469 const char **iface_name = extra_args;
1474 *iface_name = value;
1480 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1482 uint16_t *n = extra_args;
1484 if (value == NULL || extra_args == NULL)
1487 *n = (uint16_t)strtoul(value, NULL, 0);
1488 if (*n == USHRT_MAX && errno == ERANGE)
1495 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1497 struct rte_kvargs *kvlist = NULL;
1502 uint64_t disable_flags = 0;
1503 int client_mode = 0;
1504 int dequeue_zero_copy = 0;
1505 int iommu_support = 0;
1506 int postcopy_support = 0;
1510 struct rte_eth_dev *eth_dev;
1511 const char *name = rte_vdev_device_name(dev);
1513 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1515 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1516 eth_dev = rte_eth_dev_attach_secondary(name);
1518 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1521 eth_dev->rx_pkt_burst = eth_vhost_rx;
1522 eth_dev->tx_pkt_burst = eth_vhost_tx;
1523 eth_dev->dev_ops = &ops;
1524 if (dev->device.numa_node == SOCKET_ID_ANY)
1525 dev->device.numa_node = rte_socket_id();
1526 eth_dev->device = &dev->device;
1527 rte_eth_dev_probing_finish(eth_dev);
1531 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1535 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1536 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1537 &open_iface, &iface_name);
1545 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1546 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1547 &open_int, &queues);
1548 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1554 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1555 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1556 &open_int, &client_mode);
1561 flags |= RTE_VHOST_USER_CLIENT;
1564 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1565 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1566 &open_int, &dequeue_zero_copy);
1570 if (dequeue_zero_copy)
1571 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1574 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1575 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1576 &open_int, &iommu_support);
1581 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1584 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1585 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1586 &open_int, &postcopy_support);
1590 if (postcopy_support)
1591 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1594 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1595 ret = rte_kvargs_process(kvlist,
1596 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1602 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1603 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1607 if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1608 ret = rte_kvargs_process(kvlist,
1609 ETH_VHOST_LINEAR_BUF,
1610 &open_int, &linear_buf);
1614 if (linear_buf == 1)
1615 flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1618 if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1619 ret = rte_kvargs_process(kvlist,
1621 &open_int, &ext_buf);
1626 flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1629 if (dev->device.numa_node == SOCKET_ID_ANY)
1630 dev->device.numa_node = rte_socket_id();
1632 ret = eth_dev_vhost_create(dev, iface_name, queues,
1633 dev->device.numa_node, flags, disable_flags);
1635 VHOST_LOG(ERR, "Failed to create %s\n", name);
1638 rte_kvargs_free(kvlist);
1643 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1646 struct rte_eth_dev *eth_dev = NULL;
1648 name = rte_vdev_device_name(dev);
1649 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1651 /* find an ethdev entry */
1652 eth_dev = rte_eth_dev_allocated(name);
1653 if (eth_dev == NULL)
1656 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1657 return rte_eth_dev_release_port(eth_dev);
1659 eth_dev_close(eth_dev);
1661 rte_eth_dev_release_port(eth_dev);
1666 static struct rte_vdev_driver pmd_vhost_drv = {
1667 .probe = rte_pmd_vhost_probe,
1668 .remove = rte_pmd_vhost_remove,
1671 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1672 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1673 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1677 "dequeue-zero-copy=<0|1> "
1678 "iommu-support=<0|1> "
1679 "postcopy-support=<0|1> "
1681 "linear-buffer=<0|1> "
1682 "ext-buffer=<0|1>");