4 * Copyright (c) 2016 IGEL Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #ifdef RTE_LIBRTE_VHOST_NUMA
41 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
45 #include <rte_kvargs.h>
46 #include <rte_virtio_net.h>
47 #include <rte_spinlock.h>
49 #include "rte_eth_vhost.h"
51 #define ETH_VHOST_IFACE_ARG "iface"
52 #define ETH_VHOST_QUEUES_ARG "queues"
54 static const char *drivername = "VHOST PMD";
56 static const char *valid_arguments[] = {
62 static struct ether_addr base_eth_addr = {
74 rte_atomic32_t allow_queuing;
75 rte_atomic32_t while_queuing;
76 struct virtio_net *device;
77 struct pmd_internal *internal;
78 struct rte_mempool *mb_pool;
80 uint16_t virtqueue_id;
93 volatile uint16_t once;
96 struct internal_list {
97 TAILQ_ENTRY(internal_list) next;
98 struct rte_eth_dev *eth_dev;
101 TAILQ_HEAD(internal_list_head, internal_list);
102 static struct internal_list_head internal_list =
103 TAILQ_HEAD_INITIALIZER(internal_list);
105 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
107 static rte_atomic16_t nb_started_ports;
108 static pthread_t session_th;
110 static struct rte_eth_link pmd_link = {
112 .link_duplex = ETH_LINK_FULL_DUPLEX,
113 .link_status = ETH_LINK_DOWN
116 struct rte_vhost_vring_state {
119 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
120 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
122 unsigned int max_vring;
125 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
128 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
130 struct vhost_queue *r = q;
131 uint16_t i, nb_rx = 0;
133 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
136 rte_atomic32_set(&r->while_queuing, 1);
138 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
141 /* Dequeue packets from guest TX queue */
142 nb_rx = rte_vhost_dequeue_burst(r->device,
143 r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
147 for (i = 0; likely(i < nb_rx); i++) {
148 bufs[i]->port = r->port;
149 r->rx_bytes += bufs[i]->pkt_len;
153 rte_atomic32_set(&r->while_queuing, 0);
159 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
161 struct vhost_queue *r = q;
162 uint16_t i, nb_tx = 0;
164 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
167 rte_atomic32_set(&r->while_queuing, 1);
169 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
172 /* Enqueue packets to guest RX queue */
173 nb_tx = rte_vhost_enqueue_burst(r->device,
174 r->virtqueue_id, bufs, nb_bufs);
177 r->missed_pkts += nb_bufs - nb_tx;
179 for (i = 0; likely(i < nb_tx); i++)
180 r->tx_bytes += bufs[i]->pkt_len;
182 for (i = 0; likely(i < nb_tx); i++)
183 rte_pktmbuf_free(bufs[i]);
185 rte_atomic32_set(&r->while_queuing, 0);
191 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
196 static inline struct internal_list *
197 find_internal_resource(char *ifname)
200 struct internal_list *list;
201 struct pmd_internal *internal;
206 pthread_mutex_lock(&internal_list_lock);
208 TAILQ_FOREACH(list, &internal_list, next) {
209 internal = list->eth_dev->data->dev_private;
210 if (!strcmp(internal->iface_name, ifname)) {
216 pthread_mutex_unlock(&internal_list_lock);
225 new_device(struct virtio_net *dev)
227 struct rte_eth_dev *eth_dev;
228 struct internal_list *list;
229 struct pmd_internal *internal;
230 struct vhost_queue *vq;
232 char ifname[PATH_MAX];
233 #ifdef RTE_LIBRTE_VHOST_NUMA
238 RTE_LOG(INFO, PMD, "Invalid argument\n");
242 rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
243 list = find_internal_resource(ifname);
245 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
249 eth_dev = list->eth_dev;
250 internal = eth_dev->data->dev_private;
252 #ifdef RTE_LIBRTE_VHOST_NUMA
253 newnode = rte_vhost_get_numa_node(dev->vid);
255 eth_dev->data->numa_node = newnode;
258 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
259 vq = eth_dev->data->rx_queues[i];
263 vq->internal = internal;
264 vq->port = eth_dev->data->port_id;
266 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
267 vq = eth_dev->data->tx_queues[i];
271 vq->internal = internal;
272 vq->port = eth_dev->data->port_id;
275 for (i = 0; i < rte_vhost_get_queue_num(dev->vid) * VIRTIO_QNUM; i++)
276 rte_vhost_enable_guest_notification(dev, i, 0);
278 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
280 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
281 vq = eth_dev->data->rx_queues[i];
284 rte_atomic32_set(&vq->allow_queuing, 1);
286 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
287 vq = eth_dev->data->tx_queues[i];
290 rte_atomic32_set(&vq->allow_queuing, 1);
293 RTE_LOG(INFO, PMD, "New connection established\n");
295 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
301 destroy_device(volatile struct virtio_net *dev)
303 struct rte_eth_dev *eth_dev;
304 struct vhost_queue *vq;
305 struct internal_list *list;
306 char ifname[PATH_MAX];
310 RTE_LOG(INFO, PMD, "Invalid argument\n");
314 rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
315 list = find_internal_resource(ifname);
317 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
320 eth_dev = list->eth_dev;
322 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
323 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
324 vq = eth_dev->data->rx_queues[i];
327 rte_atomic32_set(&vq->allow_queuing, 0);
328 while (rte_atomic32_read(&vq->while_queuing))
331 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
332 vq = eth_dev->data->tx_queues[i];
335 rte_atomic32_set(&vq->allow_queuing, 0);
336 while (rte_atomic32_read(&vq->while_queuing))
340 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
342 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
343 vq = eth_dev->data->rx_queues[i];
348 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
349 vq = eth_dev->data->tx_queues[i];
355 RTE_LOG(INFO, PMD, "Connection closed\n");
357 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
361 vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
363 struct rte_vhost_vring_state *state;
364 struct rte_eth_dev *eth_dev;
365 struct internal_list *list;
366 char ifname[PATH_MAX];
369 RTE_LOG(ERR, PMD, "Invalid argument\n");
373 rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
374 list = find_internal_resource(ifname);
376 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
380 eth_dev = list->eth_dev;
382 state = vring_states[eth_dev->data->port_id];
383 rte_spinlock_lock(&state->lock);
384 state->cur[vring] = enable;
385 state->max_vring = RTE_MAX(vring, state->max_vring);
386 rte_spinlock_unlock(&state->lock);
388 RTE_LOG(INFO, PMD, "vring%u is %s\n",
389 vring, enable ? "enabled" : "disabled");
391 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE);
397 rte_eth_vhost_get_queue_event(uint8_t port_id,
398 struct rte_eth_vhost_queue_event *event)
400 struct rte_vhost_vring_state *state;
404 if (port_id >= RTE_MAX_ETHPORTS) {
405 RTE_LOG(ERR, PMD, "Invalid port id\n");
409 state = vring_states[port_id];
411 RTE_LOG(ERR, PMD, "Unused port\n");
415 rte_spinlock_lock(&state->lock);
416 for (i = 0; i <= state->max_vring; i++) {
417 idx = state->index++ % (state->max_vring + 1);
419 if (state->cur[idx] != state->seen[idx]) {
420 state->seen[idx] = state->cur[idx];
421 event->queue_id = idx / 2;
423 event->enable = state->cur[idx];
424 rte_spinlock_unlock(&state->lock);
428 rte_spinlock_unlock(&state->lock);
434 vhost_driver_session(void *param __rte_unused)
436 static struct virtio_net_device_ops vhost_ops;
438 /* set vhost arguments */
439 vhost_ops.new_device = new_device;
440 vhost_ops.destroy_device = destroy_device;
441 vhost_ops.vring_state_changed = vring_state_changed;
442 if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
443 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
445 /* start event handling */
446 rte_vhost_driver_session_start();
452 vhost_driver_session_start(void)
456 ret = pthread_create(&session_th,
457 NULL, vhost_driver_session, NULL);
459 RTE_LOG(ERR, PMD, "Can't create a thread\n");
465 vhost_driver_session_stop(void)
469 ret = pthread_cancel(session_th);
471 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
473 ret = pthread_join(session_th, NULL);
475 RTE_LOG(ERR, PMD, "Can't join the thread\n");
479 eth_dev_start(struct rte_eth_dev *dev)
481 struct pmd_internal *internal = dev->data->dev_private;
484 if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
485 ret = rte_vhost_driver_register(internal->iface_name);
490 /* We need only one message handling thread */
491 if (rte_atomic16_add_return(&nb_started_ports, 1) == 1)
492 ret = vhost_driver_session_start();
498 eth_dev_stop(struct rte_eth_dev *dev)
500 struct pmd_internal *internal = dev->data->dev_private;
502 if (rte_atomic16_cmpset(&internal->once, 1, 0))
503 rte_vhost_driver_unregister(internal->iface_name);
505 if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
506 vhost_driver_session_stop();
510 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
511 uint16_t nb_rx_desc __rte_unused,
512 unsigned int socket_id,
513 const struct rte_eth_rxconf *rx_conf __rte_unused,
514 struct rte_mempool *mb_pool)
516 struct vhost_queue *vq;
518 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
519 RTE_CACHE_LINE_SIZE, socket_id);
521 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
525 vq->mb_pool = mb_pool;
526 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
527 dev->data->rx_queues[rx_queue_id] = vq;
533 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
534 uint16_t nb_tx_desc __rte_unused,
535 unsigned int socket_id,
536 const struct rte_eth_txconf *tx_conf __rte_unused)
538 struct vhost_queue *vq;
540 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
541 RTE_CACHE_LINE_SIZE, socket_id);
543 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
547 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
548 dev->data->tx_queues[tx_queue_id] = vq;
554 eth_dev_info(struct rte_eth_dev *dev,
555 struct rte_eth_dev_info *dev_info)
557 struct pmd_internal *internal;
559 internal = dev->data->dev_private;
560 if (internal == NULL) {
561 RTE_LOG(ERR, PMD, "Invalid device specified\n");
565 dev_info->driver_name = drivername;
566 dev_info->max_mac_addrs = 1;
567 dev_info->max_rx_pktlen = (uint32_t)-1;
568 dev_info->max_rx_queues = internal->max_queues;
569 dev_info->max_tx_queues = internal->max_queues;
570 dev_info->min_rx_bufsize = 0;
574 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
577 unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
578 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
579 struct vhost_queue *vq;
581 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
582 i < dev->data->nb_rx_queues; i++) {
583 if (dev->data->rx_queues[i] == NULL)
585 vq = dev->data->rx_queues[i];
586 stats->q_ipackets[i] = vq->rx_pkts;
587 rx_total += stats->q_ipackets[i];
589 stats->q_ibytes[i] = vq->rx_bytes;
590 rx_total_bytes += stats->q_ibytes[i];
593 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
594 i < dev->data->nb_tx_queues; i++) {
595 if (dev->data->tx_queues[i] == NULL)
597 vq = dev->data->tx_queues[i];
598 stats->q_opackets[i] = vq->tx_pkts;
599 tx_missed_total += vq->missed_pkts;
600 tx_total += stats->q_opackets[i];
602 stats->q_obytes[i] = vq->tx_bytes;
603 tx_total_bytes += stats->q_obytes[i];
606 stats->ipackets = rx_total;
607 stats->opackets = tx_total;
608 stats->imissed = tx_missed_total;
609 stats->ibytes = rx_total_bytes;
610 stats->obytes = tx_total_bytes;
614 eth_stats_reset(struct rte_eth_dev *dev)
616 struct vhost_queue *vq;
619 for (i = 0; i < dev->data->nb_rx_queues; i++) {
620 if (dev->data->rx_queues[i] == NULL)
622 vq = dev->data->rx_queues[i];
626 for (i = 0; i < dev->data->nb_tx_queues; i++) {
627 if (dev->data->tx_queues[i] == NULL)
629 vq = dev->data->tx_queues[i];
637 eth_queue_release(void *q)
643 eth_link_update(struct rte_eth_dev *dev __rte_unused,
644 int wait_to_complete __rte_unused)
650 * Disable features in feature_mask. Returns 0 on success.
653 rte_eth_vhost_feature_disable(uint64_t feature_mask)
655 return rte_vhost_feature_disable(feature_mask);
659 * Enable features in feature_mask. Returns 0 on success.
662 rte_eth_vhost_feature_enable(uint64_t feature_mask)
664 return rte_vhost_feature_enable(feature_mask);
667 /* Returns currently supported vhost features */
669 rte_eth_vhost_feature_get(void)
671 return rte_vhost_feature_get();
674 static const struct eth_dev_ops ops = {
675 .dev_start = eth_dev_start,
676 .dev_stop = eth_dev_stop,
677 .dev_configure = eth_dev_configure,
678 .dev_infos_get = eth_dev_info,
679 .rx_queue_setup = eth_rx_queue_setup,
680 .tx_queue_setup = eth_tx_queue_setup,
681 .rx_queue_release = eth_queue_release,
682 .tx_queue_release = eth_queue_release,
683 .link_update = eth_link_update,
684 .stats_get = eth_stats_get,
685 .stats_reset = eth_stats_reset,
689 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
690 const unsigned numa_node)
692 struct rte_eth_dev_data *data = NULL;
693 struct pmd_internal *internal = NULL;
694 struct rte_eth_dev *eth_dev = NULL;
695 struct ether_addr *eth_addr = NULL;
696 struct rte_vhost_vring_state *vring_state = NULL;
697 struct internal_list *list = NULL;
699 RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
702 /* now do all data allocation - for eth_dev structure, dummy pci driver
703 * and internal (private) data
705 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
709 internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
710 if (internal == NULL)
713 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
717 /* reserve an ethdev entry */
718 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
722 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
723 if (eth_addr == NULL)
725 *eth_addr = base_eth_addr;
726 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
728 vring_state = rte_zmalloc_socket(name,
729 sizeof(*vring_state), 0, numa_node);
730 if (vring_state == NULL)
733 TAILQ_INIT(ð_dev->link_intr_cbs);
735 /* now put it all together
736 * - store queue data in internal,
737 * - store numa_node info in ethdev data
738 * - point eth_dev_data to internals
739 * - and point eth_dev structure to new eth_dev_data structure
741 internal->dev_name = strdup(name);
742 if (internal->dev_name == NULL)
744 internal->iface_name = strdup(iface_name);
745 if (internal->iface_name == NULL)
748 list->eth_dev = eth_dev;
749 pthread_mutex_lock(&internal_list_lock);
750 TAILQ_INSERT_TAIL(&internal_list, list, next);
751 pthread_mutex_unlock(&internal_list_lock);
753 rte_spinlock_init(&vring_state->lock);
754 vring_states[eth_dev->data->port_id] = vring_state;
756 data->dev_private = internal;
757 data->port_id = eth_dev->data->port_id;
758 memmove(data->name, eth_dev->data->name, sizeof(data->name));
759 data->nb_rx_queues = queues;
760 data->nb_tx_queues = queues;
761 internal->max_queues = queues;
762 data->dev_link = pmd_link;
763 data->mac_addrs = eth_addr;
765 /* We'll replace the 'data' originally allocated by eth_dev. So the
766 * vhost PMD resources won't be shared between multi processes.
768 eth_dev->data = data;
769 eth_dev->dev_ops = &ops;
770 eth_dev->driver = NULL;
772 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
773 data->kdrv = RTE_KDRV_NONE;
774 data->drv_name = internal->dev_name;
775 data->numa_node = numa_node;
777 /* finally assign rx and tx ops */
778 eth_dev->rx_pkt_burst = eth_vhost_rx;
779 eth_dev->tx_pkt_burst = eth_vhost_tx;
781 return data->port_id;
785 free(internal->dev_name);
786 rte_free(vring_state);
789 rte_eth_dev_release_port(eth_dev);
798 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
800 const char **iface_name = extra_args;
811 open_queues(const char *key __rte_unused, const char *value, void *extra_args)
813 uint16_t *q = extra_args;
815 if (value == NULL || extra_args == NULL)
818 *q = (uint16_t)strtoul(value, NULL, 0);
819 if (*q == USHRT_MAX && errno == ERANGE)
822 if (*q > RTE_MAX_QUEUES_PER_PORT)
829 rte_pmd_vhost_devinit(const char *name, const char *params)
831 struct rte_kvargs *kvlist = NULL;
836 RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
838 kvlist = rte_kvargs_parse(params, valid_arguments);
842 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
843 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
844 &open_iface, &iface_name);
852 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
853 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
854 &open_queues, &queues);
861 eth_dev_vhost_create(name, iface_name, queues, rte_socket_id());
864 rte_kvargs_free(kvlist);
869 rte_pmd_vhost_devuninit(const char *name)
871 struct rte_eth_dev *eth_dev = NULL;
872 struct pmd_internal *internal;
873 struct internal_list *list;
876 RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
878 /* find an ethdev entry */
879 eth_dev = rte_eth_dev_allocated(name);
883 internal = eth_dev->data->dev_private;
884 if (internal == NULL)
887 list = find_internal_resource(internal->iface_name);
891 pthread_mutex_lock(&internal_list_lock);
892 TAILQ_REMOVE(&internal_list, list, next);
893 pthread_mutex_unlock(&internal_list_lock);
896 eth_dev_stop(eth_dev);
898 rte_free(vring_states[eth_dev->data->port_id]);
899 vring_states[eth_dev->data->port_id] = NULL;
901 free(internal->dev_name);
902 free(internal->iface_name);
904 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
905 rte_free(eth_dev->data->rx_queues[i]);
906 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
907 rte_free(eth_dev->data->tx_queues[i]);
909 rte_free(eth_dev->data->mac_addrs);
910 rte_free(eth_dev->data);
913 rte_eth_dev_release_port(eth_dev);
918 static struct rte_driver pmd_vhost_drv = {
921 .init = rte_pmd_vhost_devinit,
922 .uninit = rte_pmd_vhost_devuninit,
925 PMD_REGISTER_DRIVER(pmd_vhost_drv);