1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
11 #include <ethdev_driver.h>
12 #include <ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
20 #include "rte_eth_vhost.h"
22 RTE_LOG_REGISTER_DEFAULT(vhost_logtype, NOTICE);
24 #define VHOST_LOG(level, ...) \
25 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
27 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
29 #define ETH_VHOST_IFACE_ARG "iface"
30 #define ETH_VHOST_QUEUES_ARG "queues"
31 #define ETH_VHOST_CLIENT_ARG "client"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF "linear-buffer"
36 #define ETH_VHOST_EXT_BUF "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
39 static const char *valid_arguments[] = {
43 ETH_VHOST_IOMMU_SUPPORT,
44 ETH_VHOST_POSTCOPY_SUPPORT,
45 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
51 static struct rte_ether_addr base_eth_addr = {
62 enum vhost_xstats_pkts {
63 VHOST_UNDERSIZE_PKT = 0,
68 VHOST_512_TO_1023_PKT,
69 VHOST_1024_TO_1522_PKT,
70 VHOST_1523_TO_MAX_PKT,
78 VHOST_ERRORS_FRAGMENTED,
80 VHOST_UNKNOWN_PROTOCOL,
88 uint64_t xstats[VHOST_XSTATS_MAX];
93 rte_atomic32_t allow_queuing;
94 rte_atomic32_t while_queuing;
95 struct pmd_internal *internal;
96 struct rte_mempool *mb_pool;
98 uint16_t virtqueue_id;
99 struct vhost_stats stats;
101 rte_spinlock_t intr_lock;
104 struct pmd_internal {
105 rte_atomic32_t dev_attached;
108 uint64_t disable_flags;
111 rte_atomic32_t started;
115 struct internal_list {
116 TAILQ_ENTRY(internal_list) next;
117 struct rte_eth_dev *eth_dev;
120 TAILQ_HEAD(internal_list_head, internal_list);
121 static struct internal_list_head internal_list =
122 TAILQ_HEAD_INITIALIZER(internal_list);
124 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
126 static struct rte_eth_link pmd_link = {
128 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
129 .link_status = RTE_ETH_LINK_DOWN
132 struct rte_vhost_vring_state {
135 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
136 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
138 unsigned int max_vring;
141 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
143 #define VHOST_XSTATS_NAME_SIZE 64
145 struct vhost_xstats_name_off {
146 char name[VHOST_XSTATS_NAME_SIZE];
150 /* [rx]_is prepended to the name string here */
151 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
153 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
155 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
157 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
158 {"broadcast_packets",
159 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
160 {"multicast_packets",
161 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
163 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
164 {"undersize_packets",
165 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
167 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
168 {"size_65_to_127_packets",
169 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
170 {"size_128_to_255_packets",
171 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
172 {"size_256_to_511_packets",
173 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
174 {"size_512_to_1023_packets",
175 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
176 {"size_1024_to_1522_packets",
177 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
178 {"size_1523_to_max_packets",
179 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
180 {"errors_with_bad_CRC",
181 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
182 {"fragmented_errors",
183 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
185 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
186 {"unknown_protos_packets",
187 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
190 /* [tx]_ is prepended to the name string here */
191 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
193 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
195 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
197 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
198 {"broadcast_packets",
199 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
200 {"multicast_packets",
201 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
203 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
204 {"undersize_packets",
205 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
207 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
208 {"size_65_to_127_packets",
209 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
210 {"size_128_to_255_packets",
211 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
212 {"size_256_to_511_packets",
213 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
214 {"size_512_to_1023_packets",
215 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
216 {"size_1024_to_1522_packets",
217 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
218 {"size_1523_to_max_packets",
219 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
220 {"errors_with_bad_CRC",
221 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
224 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
225 sizeof(vhost_rxport_stat_strings[0]))
227 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
228 sizeof(vhost_txport_stat_strings[0]))
231 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
233 struct vhost_queue *vq = NULL;
236 for (i = 0; i < dev->data->nb_rx_queues; i++) {
237 vq = dev->data->rx_queues[i];
240 memset(&vq->stats, 0, sizeof(vq->stats));
242 for (i = 0; i < dev->data->nb_tx_queues; i++) {
243 vq = dev->data->tx_queues[i];
246 memset(&vq->stats, 0, sizeof(vq->stats));
253 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
254 struct rte_eth_xstat_name *xstats_names,
255 unsigned int limit __rte_unused)
259 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
263 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
264 snprintf(xstats_names[count].name,
265 sizeof(xstats_names[count].name),
266 "rx_%s", vhost_rxport_stat_strings[t].name);
269 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
270 snprintf(xstats_names[count].name,
271 sizeof(xstats_names[count].name),
272 "tx_%s", vhost_txport_stat_strings[t].name);
279 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
284 unsigned int count = 0;
285 struct vhost_queue *vq = NULL;
286 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
291 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
292 xstats[count].value = 0;
293 for (i = 0; i < dev->data->nb_rx_queues; i++) {
294 vq = dev->data->rx_queues[i];
297 xstats[count].value +=
298 *(uint64_t *)(((char *)vq)
299 + vhost_rxport_stat_strings[t].offset);
301 xstats[count].id = count;
304 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
305 xstats[count].value = 0;
306 for (i = 0; i < dev->data->nb_tx_queues; i++) {
307 vq = dev->data->tx_queues[i];
310 xstats[count].value +=
311 *(uint64_t *)(((char *)vq)
312 + vhost_txport_stat_strings[t].offset);
314 xstats[count].id = count;
321 vhost_count_xcast_packets(struct vhost_queue *vq,
322 struct rte_mbuf *mbuf)
324 struct rte_ether_addr *ea = NULL;
325 struct vhost_stats *pstats = &vq->stats;
327 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
328 if (rte_is_multicast_ether_addr(ea)) {
329 if (rte_is_broadcast_ether_addr(ea))
330 pstats->xstats[VHOST_BROADCAST_PKT]++;
332 pstats->xstats[VHOST_MULTICAST_PKT]++;
334 pstats->xstats[VHOST_UNICAST_PKT]++;
338 static __rte_always_inline void
339 vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
341 uint32_t pkt_len = 0;
343 struct vhost_stats *pstats = &vq->stats;
345 pstats->xstats[VHOST_PKT]++;
346 pkt_len = buf->pkt_len;
348 pstats->xstats[VHOST_64_PKT]++;
349 } else if (pkt_len > 64 && pkt_len < 1024) {
350 index = (sizeof(pkt_len) * 8)
351 - __builtin_clz(pkt_len) - 5;
352 pstats->xstats[index]++;
355 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
356 else if (pkt_len <= 1522)
357 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
358 else if (pkt_len > 1522)
359 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
361 vhost_count_xcast_packets(vq, buf);
365 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
367 struct vhost_queue *r = q;
368 uint16_t i, nb_rx = 0;
369 uint16_t nb_receive = nb_bufs;
371 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
374 rte_atomic32_set(&r->while_queuing, 1);
376 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
379 /* Dequeue packets from guest TX queue */
382 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
383 VHOST_MAX_PKT_BURST);
385 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
386 r->mb_pool, &bufs[nb_rx],
390 nb_receive -= nb_pkts;
395 r->stats.pkts += nb_rx;
397 for (i = 0; likely(i < nb_rx); i++) {
398 bufs[i]->port = r->port;
399 bufs[i]->vlan_tci = 0;
401 if (r->internal->vlan_strip)
402 rte_vlan_strip(bufs[i]);
404 r->stats.bytes += bufs[i]->pkt_len;
405 r->stats.xstats[VHOST_BYTE] += bufs[i]->pkt_len;
407 vhost_update_single_packet_xstats(r, bufs[i]);
411 rte_atomic32_set(&r->while_queuing, 0);
417 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
419 struct vhost_queue *r = q;
420 uint16_t i, nb_tx = 0;
421 uint16_t nb_send = 0;
422 uint64_t nb_bytes = 0;
423 uint64_t nb_missed = 0;
425 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
428 rte_atomic32_set(&r->while_queuing, 1);
430 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
433 for (i = 0; i < nb_bufs; i++) {
434 struct rte_mbuf *m = bufs[i];
436 /* Do VLAN tag insertion */
437 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
438 int error = rte_vlan_insert(&m);
439 if (unlikely(error)) {
449 /* Enqueue packets to guest RX queue */
452 uint16_t num = (uint16_t)RTE_MIN(nb_send,
453 VHOST_MAX_PKT_BURST);
455 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
464 for (i = 0; likely(i < nb_tx); i++) {
465 nb_bytes += bufs[i]->pkt_len;
466 vhost_update_single_packet_xstats(r, bufs[i]);
469 nb_missed = nb_bufs - nb_tx;
471 r->stats.pkts += nb_tx;
472 r->stats.bytes += nb_bytes;
473 r->stats.missed_pkts += nb_missed;
475 r->stats.xstats[VHOST_BYTE] += nb_bytes;
476 r->stats.xstats[VHOST_MISSED_PKT] += nb_missed;
477 r->stats.xstats[VHOST_UNICAST_PKT] += nb_missed;
479 /* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
480 * ifHCOutBroadcastPkts counters are increased when packets are not
481 * transmitted successfully.
483 for (i = nb_tx; i < nb_bufs; i++)
484 vhost_count_xcast_packets(r, bufs[i]);
486 for (i = 0; likely(i < nb_tx); i++)
487 rte_pktmbuf_free(bufs[i]);
489 rte_atomic32_set(&r->while_queuing, 0);
494 static inline struct internal_list *
495 find_internal_resource(char *ifname)
498 struct internal_list *list;
499 struct pmd_internal *internal;
504 pthread_mutex_lock(&internal_list_lock);
506 TAILQ_FOREACH(list, &internal_list, next) {
507 internal = list->eth_dev->data->dev_private;
508 if (!strcmp(internal->iface_name, ifname)) {
514 pthread_mutex_unlock(&internal_list_lock);
523 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
525 struct rte_intr_handle *handle = eth_dev->intr_handle;
526 struct rte_epoll_event rev, *elist;
532 elist = rte_intr_elist_index_get(handle, rxq_idx);
533 if (rte_intr_efds_index_get(handle, rxq_idx) == elist->fd)
536 VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
540 VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
544 * First remove invalid epoll event, and then install
545 * the new one. May be solved with a proper API in the
550 ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
553 VHOST_LOG(ERR, "Delete epoll event failed.\n");
557 rev.fd = rte_intr_efds_index_get(handle, rxq_idx);
558 if (rte_intr_elist_index_set(handle, rxq_idx, rev))
561 elist = rte_intr_elist_index_get(handle, rxq_idx);
562 ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd, elist);
564 VHOST_LOG(ERR, "Add epoll event failed.\n");
572 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
574 struct rte_vhost_vring vring;
575 struct vhost_queue *vq;
576 int old_intr_enable, ret = 0;
578 vq = dev->data->rx_queues[qid];
580 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
584 rte_spinlock_lock(&vq->intr_lock);
585 old_intr_enable = vq->intr_enable;
587 ret = eth_vhost_update_intr(dev, qid);
588 rte_spinlock_unlock(&vq->intr_lock);
591 VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
592 vq->intr_enable = old_intr_enable;
596 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
598 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
601 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
602 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
609 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
611 struct rte_vhost_vring vring;
612 struct vhost_queue *vq;
615 vq = dev->data->rx_queues[qid];
617 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
621 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
623 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
626 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
627 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
636 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
638 struct rte_intr_handle *intr_handle = dev->intr_handle;
640 if (intr_handle != NULL) {
641 rte_intr_vec_list_free(intr_handle);
642 rte_intr_instance_free(intr_handle);
644 dev->intr_handle = NULL;
648 eth_vhost_install_intr(struct rte_eth_dev *dev)
650 struct rte_vhost_vring vring;
651 struct vhost_queue *vq;
652 int nb_rxq = dev->data->nb_rx_queues;
656 /* uninstall firstly if we are reconnecting */
657 if (dev->intr_handle != NULL)
658 eth_vhost_uninstall_intr(dev);
660 dev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
661 if (dev->intr_handle == NULL) {
662 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
665 if (rte_intr_efd_counter_size_set(dev->intr_handle, sizeof(uint64_t)))
668 if (rte_intr_vec_list_alloc(dev->intr_handle, NULL, nb_rxq)) {
670 "Failed to allocate memory for interrupt vector\n");
671 rte_intr_instance_free(dev->intr_handle);
676 VHOST_LOG(INFO, "Prepare intr vec\n");
677 for (i = 0; i < nb_rxq; i++) {
678 if (rte_intr_vec_list_index_set(dev->intr_handle, i, RTE_INTR_VEC_RXTX_OFFSET + i))
680 if (rte_intr_efds_index_set(dev->intr_handle, i, -1))
682 vq = dev->data->rx_queues[i];
684 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
688 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
691 "Failed to get rxq-%d's vring, skip!\n", i);
695 if (vring.kickfd < 0) {
697 "rxq-%d's kickfd is invalid, skip!\n", i);
701 if (rte_intr_efds_index_set(dev->intr_handle, i, vring.kickfd))
703 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
706 if (rte_intr_nb_efd_set(dev->intr_handle, nb_rxq))
709 if (rte_intr_max_intr_set(dev->intr_handle, nb_rxq + 1))
712 if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VDEV))
719 update_queuing_status(struct rte_eth_dev *dev)
721 struct pmd_internal *internal = dev->data->dev_private;
722 struct vhost_queue *vq;
724 int allow_queuing = 1;
726 if (!dev->data->rx_queues || !dev->data->tx_queues)
729 if (rte_atomic32_read(&internal->started) == 0 ||
730 rte_atomic32_read(&internal->dev_attached) == 0)
733 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
734 for (i = 0; i < dev->data->nb_rx_queues; i++) {
735 vq = dev->data->rx_queues[i];
738 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
739 while (rte_atomic32_read(&vq->while_queuing))
743 for (i = 0; i < dev->data->nb_tx_queues; i++) {
744 vq = dev->data->tx_queues[i];
747 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
748 while (rte_atomic32_read(&vq->while_queuing))
754 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
756 struct vhost_queue *vq;
759 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
760 vq = eth_dev->data->rx_queues[i];
763 vq->vid = internal->vid;
764 vq->internal = internal;
765 vq->port = eth_dev->data->port_id;
767 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
768 vq = eth_dev->data->tx_queues[i];
771 vq->vid = internal->vid;
772 vq->internal = internal;
773 vq->port = eth_dev->data->port_id;
780 struct rte_eth_dev *eth_dev;
781 struct internal_list *list;
782 struct pmd_internal *internal;
783 struct rte_eth_conf *dev_conf;
785 char ifname[PATH_MAX];
786 #ifdef RTE_LIBRTE_VHOST_NUMA
790 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
791 list = find_internal_resource(ifname);
793 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
797 eth_dev = list->eth_dev;
798 internal = eth_dev->data->dev_private;
799 dev_conf = ð_dev->data->dev_conf;
801 #ifdef RTE_LIBRTE_VHOST_NUMA
802 newnode = rte_vhost_get_numa_node(vid);
804 eth_dev->data->numa_node = newnode;
808 if (rte_atomic32_read(&internal->started) == 1) {
809 queue_setup(eth_dev, internal);
811 if (dev_conf->intr_conf.rxq) {
812 if (eth_vhost_install_intr(eth_dev) < 0) {
814 "Failed to install interrupt handler.");
819 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
822 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
823 rte_vhost_enable_guest_notification(vid, i, 0);
825 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
827 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
829 rte_atomic32_set(&internal->dev_attached, 1);
830 update_queuing_status(eth_dev);
832 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
834 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
840 destroy_device(int vid)
842 struct rte_eth_dev *eth_dev;
843 struct pmd_internal *internal;
844 struct vhost_queue *vq;
845 struct internal_list *list;
846 char ifname[PATH_MAX];
848 struct rte_vhost_vring_state *state;
850 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
851 list = find_internal_resource(ifname);
853 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
856 eth_dev = list->eth_dev;
857 internal = eth_dev->data->dev_private;
859 rte_atomic32_set(&internal->dev_attached, 0);
860 update_queuing_status(eth_dev);
862 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
864 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
865 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
866 vq = eth_dev->data->rx_queues[i];
871 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
872 vq = eth_dev->data->tx_queues[i];
879 state = vring_states[eth_dev->data->port_id];
880 rte_spinlock_lock(&state->lock);
881 for (i = 0; i <= state->max_vring; i++) {
882 state->cur[i] = false;
883 state->seen[i] = false;
885 state->max_vring = 0;
886 rte_spinlock_unlock(&state->lock);
888 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
889 eth_vhost_uninstall_intr(eth_dev);
891 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
895 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
897 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
898 struct pmd_internal *internal = eth_dev->data->dev_private;
899 struct vhost_queue *vq;
900 struct rte_vhost_vring vring;
901 int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
905 * The vring kickfd may be changed after the new device notification.
906 * Update it when the vring state is updated.
908 if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
909 rte_atomic32_read(&internal->dev_attached) &&
910 rte_atomic32_read(&internal->started) &&
911 dev_conf->intr_conf.rxq) {
912 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
914 VHOST_LOG(ERR, "Failed to get vring %d information.\n",
919 if (rte_intr_efds_index_set(eth_dev->intr_handle, rx_idx,
923 vq = eth_dev->data->rx_queues[rx_idx];
925 VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
929 rte_spinlock_lock(&vq->intr_lock);
931 ret = eth_vhost_update_intr(eth_dev, rx_idx);
932 rte_spinlock_unlock(&vq->intr_lock);
939 vring_state_changed(int vid, uint16_t vring, int enable)
941 struct rte_vhost_vring_state *state;
942 struct rte_eth_dev *eth_dev;
943 struct internal_list *list;
944 char ifname[PATH_MAX];
946 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
947 list = find_internal_resource(ifname);
949 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
953 eth_dev = list->eth_dev;
955 state = vring_states[eth_dev->data->port_id];
957 if (enable && vring_conf_update(vid, eth_dev, vring))
958 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
961 rte_spinlock_lock(&state->lock);
962 if (state->cur[vring] == enable) {
963 rte_spinlock_unlock(&state->lock);
966 state->cur[vring] = enable;
967 state->max_vring = RTE_MAX(vring, state->max_vring);
968 rte_spinlock_unlock(&state->lock);
970 VHOST_LOG(INFO, "vring%u is %s\n",
971 vring, enable ? "enabled" : "disabled");
973 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
978 static struct vhost_device_ops vhost_ops = {
979 .new_device = new_device,
980 .destroy_device = destroy_device,
981 .vring_state_changed = vring_state_changed,
985 vhost_driver_setup(struct rte_eth_dev *eth_dev)
987 struct pmd_internal *internal = eth_dev->data->dev_private;
988 struct internal_list *list = NULL;
989 struct rte_vhost_vring_state *vring_state = NULL;
990 unsigned int numa_node = eth_dev->device->numa_node;
991 const char *name = eth_dev->device->name;
993 /* Don't try to setup again if it has already been done. */
994 list = find_internal_resource(internal->iface_name);
998 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1002 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
1004 if (vring_state == NULL)
1007 list->eth_dev = eth_dev;
1008 pthread_mutex_lock(&internal_list_lock);
1009 TAILQ_INSERT_TAIL(&internal_list, list, next);
1010 pthread_mutex_unlock(&internal_list_lock);
1012 rte_spinlock_init(&vring_state->lock);
1013 vring_states[eth_dev->data->port_id] = vring_state;
1015 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
1018 if (internal->disable_flags) {
1019 if (rte_vhost_driver_disable_features(internal->iface_name,
1020 internal->disable_flags))
1024 if (rte_vhost_driver_callback_register(internal->iface_name,
1026 VHOST_LOG(ERR, "Can't register callbacks\n");
1030 if (rte_vhost_driver_start(internal->iface_name) < 0) {
1031 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1032 internal->iface_name);
1039 rte_vhost_driver_unregister(internal->iface_name);
1041 vring_states[eth_dev->data->port_id] = NULL;
1042 pthread_mutex_lock(&internal_list_lock);
1043 TAILQ_REMOVE(&internal_list, list, next);
1044 pthread_mutex_unlock(&internal_list_lock);
1045 rte_free(vring_state);
1053 rte_eth_vhost_get_queue_event(uint16_t port_id,
1054 struct rte_eth_vhost_queue_event *event)
1056 struct rte_vhost_vring_state *state;
1060 if (port_id >= RTE_MAX_ETHPORTS) {
1061 VHOST_LOG(ERR, "Invalid port id\n");
1065 state = vring_states[port_id];
1067 VHOST_LOG(ERR, "Unused port\n");
1071 rte_spinlock_lock(&state->lock);
1072 for (i = 0; i <= state->max_vring; i++) {
1073 idx = state->index++ % (state->max_vring + 1);
1075 if (state->cur[idx] != state->seen[idx]) {
1076 state->seen[idx] = state->cur[idx];
1077 event->queue_id = idx / 2;
1078 event->rx = idx & 1;
1079 event->enable = state->cur[idx];
1080 rte_spinlock_unlock(&state->lock);
1084 rte_spinlock_unlock(&state->lock);
1090 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1092 struct internal_list *list;
1093 struct rte_eth_dev *eth_dev;
1094 struct vhost_queue *vq;
1097 if (!rte_eth_dev_is_valid_port(port_id))
1100 pthread_mutex_lock(&internal_list_lock);
1102 TAILQ_FOREACH(list, &internal_list, next) {
1103 eth_dev = list->eth_dev;
1104 if (eth_dev->data->port_id == port_id) {
1105 vq = eth_dev->data->rx_queues[0];
1113 pthread_mutex_unlock(&internal_list_lock);
1119 eth_dev_configure(struct rte_eth_dev *dev)
1121 struct pmd_internal *internal = dev->data->dev_private;
1122 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1124 /* NOTE: the same process has to operate a vhost interface
1125 * from beginning to end (from eth_dev configure to eth_dev close).
1126 * It is user's responsibility at the moment.
1128 if (vhost_driver_setup(dev) < 0)
1131 internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1137 eth_dev_start(struct rte_eth_dev *eth_dev)
1139 struct pmd_internal *internal = eth_dev->data->dev_private;
1140 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1142 queue_setup(eth_dev, internal);
1144 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1145 if (dev_conf->intr_conf.rxq) {
1146 if (eth_vhost_install_intr(eth_dev) < 0) {
1148 "Failed to install interrupt handler.");
1154 rte_atomic32_set(&internal->started, 1);
1155 update_queuing_status(eth_dev);
1161 eth_dev_stop(struct rte_eth_dev *dev)
1163 struct pmd_internal *internal = dev->data->dev_private;
1165 dev->data->dev_started = 0;
1166 rte_atomic32_set(&internal->started, 0);
1167 update_queuing_status(dev);
1173 eth_dev_close(struct rte_eth_dev *dev)
1175 struct pmd_internal *internal;
1176 struct internal_list *list;
1177 unsigned int i, ret;
1179 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1182 internal = dev->data->dev_private;
1186 ret = eth_dev_stop(dev);
1188 list = find_internal_resource(internal->iface_name);
1190 rte_vhost_driver_unregister(internal->iface_name);
1191 pthread_mutex_lock(&internal_list_lock);
1192 TAILQ_REMOVE(&internal_list, list, next);
1193 pthread_mutex_unlock(&internal_list_lock);
1197 if (dev->data->rx_queues)
1198 for (i = 0; i < dev->data->nb_rx_queues; i++)
1199 rte_free(dev->data->rx_queues[i]);
1201 if (dev->data->tx_queues)
1202 for (i = 0; i < dev->data->nb_tx_queues; i++)
1203 rte_free(dev->data->tx_queues[i]);
1205 rte_free(internal->iface_name);
1208 dev->data->dev_private = NULL;
1210 rte_free(vring_states[dev->data->port_id]);
1211 vring_states[dev->data->port_id] = NULL;
1217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1218 uint16_t nb_rx_desc __rte_unused,
1219 unsigned int socket_id,
1220 const struct rte_eth_rxconf *rx_conf __rte_unused,
1221 struct rte_mempool *mb_pool)
1223 struct vhost_queue *vq;
1225 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1226 RTE_CACHE_LINE_SIZE, socket_id);
1228 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1232 vq->mb_pool = mb_pool;
1233 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1234 rte_spinlock_init(&vq->intr_lock);
1235 dev->data->rx_queues[rx_queue_id] = vq;
1241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1242 uint16_t nb_tx_desc __rte_unused,
1243 unsigned int socket_id,
1244 const struct rte_eth_txconf *tx_conf __rte_unused)
1246 struct vhost_queue *vq;
1248 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1249 RTE_CACHE_LINE_SIZE, socket_id);
1251 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1255 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1256 rte_spinlock_init(&vq->intr_lock);
1257 dev->data->tx_queues[tx_queue_id] = vq;
1263 eth_dev_info(struct rte_eth_dev *dev,
1264 struct rte_eth_dev_info *dev_info)
1266 struct pmd_internal *internal;
1268 internal = dev->data->dev_private;
1269 if (internal == NULL) {
1270 VHOST_LOG(ERR, "Invalid device specified\n");
1274 dev_info->max_mac_addrs = 1;
1275 dev_info->max_rx_pktlen = (uint32_t)-1;
1276 dev_info->max_rx_queues = internal->max_queues;
1277 dev_info->max_tx_queues = internal->max_queues;
1278 dev_info->min_rx_bufsize = 0;
1280 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1281 RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1282 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1288 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1291 unsigned long rx_total = 0, tx_total = 0;
1292 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1293 struct vhost_queue *vq;
1295 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1296 i < dev->data->nb_rx_queues; i++) {
1297 if (dev->data->rx_queues[i] == NULL)
1299 vq = dev->data->rx_queues[i];
1300 stats->q_ipackets[i] = vq->stats.pkts;
1301 rx_total += stats->q_ipackets[i];
1303 stats->q_ibytes[i] = vq->stats.bytes;
1304 rx_total_bytes += stats->q_ibytes[i];
1307 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1308 i < dev->data->nb_tx_queues; i++) {
1309 if (dev->data->tx_queues[i] == NULL)
1311 vq = dev->data->tx_queues[i];
1312 stats->q_opackets[i] = vq->stats.pkts;
1313 tx_total += stats->q_opackets[i];
1315 stats->q_obytes[i] = vq->stats.bytes;
1316 tx_total_bytes += stats->q_obytes[i];
1319 stats->ipackets = rx_total;
1320 stats->opackets = tx_total;
1321 stats->ibytes = rx_total_bytes;
1322 stats->obytes = tx_total_bytes;
1328 eth_stats_reset(struct rte_eth_dev *dev)
1330 struct vhost_queue *vq;
1333 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1334 if (dev->data->rx_queues[i] == NULL)
1336 vq = dev->data->rx_queues[i];
1338 vq->stats.bytes = 0;
1340 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1341 if (dev->data->tx_queues[i] == NULL)
1343 vq = dev->data->tx_queues[i];
1345 vq->stats.bytes = 0;
1346 vq->stats.missed_pkts = 0;
1353 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1355 rte_free(dev->data->rx_queues[qid]);
1359 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1361 rte_free(dev->data->tx_queues[qid]);
1365 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1368 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1369 * and releases mbuf, so nothing to cleanup.
1375 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1376 int wait_to_complete __rte_unused)
1382 eth_rx_queue_count(void *rx_queue)
1384 struct vhost_queue *vq;
1390 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1393 static const struct eth_dev_ops ops = {
1394 .dev_start = eth_dev_start,
1395 .dev_stop = eth_dev_stop,
1396 .dev_close = eth_dev_close,
1397 .dev_configure = eth_dev_configure,
1398 .dev_infos_get = eth_dev_info,
1399 .rx_queue_setup = eth_rx_queue_setup,
1400 .tx_queue_setup = eth_tx_queue_setup,
1401 .rx_queue_release = eth_rx_queue_release,
1402 .tx_queue_release = eth_tx_queue_release,
1403 .tx_done_cleanup = eth_tx_done_cleanup,
1404 .link_update = eth_link_update,
1405 .stats_get = eth_stats_get,
1406 .stats_reset = eth_stats_reset,
1407 .xstats_reset = vhost_dev_xstats_reset,
1408 .xstats_get = vhost_dev_xstats_get,
1409 .xstats_get_names = vhost_dev_xstats_get_names,
1410 .rx_queue_intr_enable = eth_rxq_intr_enable,
1411 .rx_queue_intr_disable = eth_rxq_intr_disable,
1415 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1416 int16_t queues, const unsigned int numa_node, uint64_t flags,
1417 uint64_t disable_flags)
1419 const char *name = rte_vdev_device_name(dev);
1420 struct rte_eth_dev_data *data;
1421 struct pmd_internal *internal = NULL;
1422 struct rte_eth_dev *eth_dev = NULL;
1423 struct rte_ether_addr *eth_addr = NULL;
1425 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1428 /* reserve an ethdev entry */
1429 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1430 if (eth_dev == NULL)
1432 data = eth_dev->data;
1434 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1435 if (eth_addr == NULL)
1437 data->mac_addrs = eth_addr;
1438 *eth_addr = base_eth_addr;
1439 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1441 /* now put it all together
1442 * - store queue data in internal,
1443 * - point eth_dev_data to internals
1444 * - and point eth_dev structure to new eth_dev_data structure
1446 internal = eth_dev->data->dev_private;
1447 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1449 if (internal->iface_name == NULL)
1451 strcpy(internal->iface_name, iface_name);
1453 data->nb_rx_queues = queues;
1454 data->nb_tx_queues = queues;
1455 internal->max_queues = queues;
1457 internal->flags = flags;
1458 internal->disable_flags = disable_flags;
1459 data->dev_link = pmd_link;
1460 data->dev_flags = RTE_ETH_DEV_INTR_LSC |
1461 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1462 data->promiscuous = 1;
1463 data->all_multicast = 1;
1465 eth_dev->dev_ops = &ops;
1466 eth_dev->rx_queue_count = eth_rx_queue_count;
1468 /* finally assign rx and tx ops */
1469 eth_dev->rx_pkt_burst = eth_vhost_rx;
1470 eth_dev->tx_pkt_burst = eth_vhost_tx;
1472 rte_eth_dev_probing_finish(eth_dev);
1477 rte_free(internal->iface_name);
1478 rte_eth_dev_release_port(eth_dev);
1484 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1486 const char **iface_name = extra_args;
1491 *iface_name = value;
1497 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1499 uint16_t *n = extra_args;
1501 if (value == NULL || extra_args == NULL)
1504 *n = (uint16_t)strtoul(value, NULL, 0);
1505 if (*n == USHRT_MAX && errno == ERANGE)
1512 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1514 struct rte_kvargs *kvlist = NULL;
1519 uint64_t disable_flags = 0;
1520 int client_mode = 0;
1521 int iommu_support = 0;
1522 int postcopy_support = 0;
1526 struct rte_eth_dev *eth_dev;
1527 const char *name = rte_vdev_device_name(dev);
1529 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1531 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1532 eth_dev = rte_eth_dev_attach_secondary(name);
1534 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1537 eth_dev->rx_pkt_burst = eth_vhost_rx;
1538 eth_dev->tx_pkt_burst = eth_vhost_tx;
1539 eth_dev->dev_ops = &ops;
1540 if (dev->device.numa_node == SOCKET_ID_ANY)
1541 dev->device.numa_node = rte_socket_id();
1542 eth_dev->device = &dev->device;
1543 rte_eth_dev_probing_finish(eth_dev);
1547 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1551 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1552 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1553 &open_iface, &iface_name);
1561 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1562 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1563 &open_int, &queues);
1564 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1570 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1571 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1572 &open_int, &client_mode);
1577 flags |= RTE_VHOST_USER_CLIENT;
1580 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1581 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1582 &open_int, &iommu_support);
1587 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1590 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1591 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1592 &open_int, &postcopy_support);
1596 if (postcopy_support)
1597 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1600 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1601 ret = rte_kvargs_process(kvlist,
1602 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1608 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1609 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1613 if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1614 ret = rte_kvargs_process(kvlist,
1615 ETH_VHOST_LINEAR_BUF,
1616 &open_int, &linear_buf);
1620 if (linear_buf == 1)
1621 flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1624 if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1625 ret = rte_kvargs_process(kvlist,
1627 &open_int, &ext_buf);
1632 flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1635 if (dev->device.numa_node == SOCKET_ID_ANY)
1636 dev->device.numa_node = rte_socket_id();
1638 ret = eth_dev_vhost_create(dev, iface_name, queues,
1639 dev->device.numa_node, flags, disable_flags);
1641 VHOST_LOG(ERR, "Failed to create %s\n", name);
1644 rte_kvargs_free(kvlist);
1649 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1652 struct rte_eth_dev *eth_dev = NULL;
1654 name = rte_vdev_device_name(dev);
1655 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1657 /* find an ethdev entry */
1658 eth_dev = rte_eth_dev_allocated(name);
1659 if (eth_dev == NULL)
1662 eth_dev_close(eth_dev);
1663 rte_eth_dev_release_port(eth_dev);
1668 static struct rte_vdev_driver pmd_vhost_drv = {
1669 .probe = rte_pmd_vhost_probe,
1670 .remove = rte_pmd_vhost_remove,
1673 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1674 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1675 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1679 "iommu-support=<0|1> "
1680 "postcopy-support=<0|1> "
1682 "linear-buffer=<0|1> "
1683 "ext-buffer=<0|1>");