net/vhost: insert/strip VLAN header in software
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev_driver.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
46
47 #include "rte_eth_vhost.h"
48
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
56 #define VHOST_MAX_PKT_BURST 32
57
58 static const char *valid_arguments[] = {
59         ETH_VHOST_IFACE_ARG,
60         ETH_VHOST_QUEUES_ARG,
61         ETH_VHOST_CLIENT_ARG,
62         ETH_VHOST_DEQUEUE_ZERO_COPY,
63         ETH_VHOST_IOMMU_SUPPORT,
64         NULL
65 };
66
67 static struct ether_addr base_eth_addr = {
68         .addr_bytes = {
69                 0x56 /* V */,
70                 0x48 /* H */,
71                 0x4F /* O */,
72                 0x53 /* S */,
73                 0x54 /* T */,
74                 0x00
75         }
76 };
77
78 enum vhost_xstats_pkts {
79         VHOST_UNDERSIZE_PKT = 0,
80         VHOST_64_PKT,
81         VHOST_65_TO_127_PKT,
82         VHOST_128_TO_255_PKT,
83         VHOST_256_TO_511_PKT,
84         VHOST_512_TO_1023_PKT,
85         VHOST_1024_TO_1522_PKT,
86         VHOST_1523_TO_MAX_PKT,
87         VHOST_BROADCAST_PKT,
88         VHOST_MULTICAST_PKT,
89         VHOST_UNICAST_PKT,
90         VHOST_ERRORS_PKT,
91         VHOST_ERRORS_FRAGMENTED,
92         VHOST_ERRORS_JABBER,
93         VHOST_UNKNOWN_PROTOCOL,
94         VHOST_XSTATS_MAX,
95 };
96
97 struct vhost_stats {
98         uint64_t pkts;
99         uint64_t bytes;
100         uint64_t missed_pkts;
101         uint64_t xstats[VHOST_XSTATS_MAX];
102 };
103
104 struct vhost_queue {
105         int vid;
106         rte_atomic32_t allow_queuing;
107         rte_atomic32_t while_queuing;
108         struct pmd_internal *internal;
109         struct rte_mempool *mb_pool;
110         uint16_t port;
111         uint16_t virtqueue_id;
112         struct vhost_stats stats;
113 };
114
115 struct pmd_internal {
116         rte_atomic32_t dev_attached;
117         char *dev_name;
118         char *iface_name;
119         uint16_t max_queues;
120         uint16_t vid;
121         rte_atomic32_t started;
122         uint8_t vlan_strip;
123 };
124
125 struct internal_list {
126         TAILQ_ENTRY(internal_list) next;
127         struct rte_eth_dev *eth_dev;
128 };
129
130 TAILQ_HEAD(internal_list_head, internal_list);
131 static struct internal_list_head internal_list =
132         TAILQ_HEAD_INITIALIZER(internal_list);
133
134 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
135
136 static struct rte_eth_link pmd_link = {
137                 .link_speed = 10000,
138                 .link_duplex = ETH_LINK_FULL_DUPLEX,
139                 .link_status = ETH_LINK_DOWN
140 };
141
142 struct rte_vhost_vring_state {
143         rte_spinlock_t lock;
144
145         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
146         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
147         unsigned int index;
148         unsigned int max_vring;
149 };
150
151 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
152
153 #define VHOST_XSTATS_NAME_SIZE 64
154
155 struct vhost_xstats_name_off {
156         char name[VHOST_XSTATS_NAME_SIZE];
157         uint64_t offset;
158 };
159
160 /* [rx]_is prepended to the name string here */
161 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
162         {"good_packets",
163          offsetof(struct vhost_queue, stats.pkts)},
164         {"total_bytes",
165          offsetof(struct vhost_queue, stats.bytes)},
166         {"missed_pkts",
167          offsetof(struct vhost_queue, stats.missed_pkts)},
168         {"broadcast_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
170         {"multicast_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
172         {"unicast_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
174          {"undersize_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
176         {"size_64_packets",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
178         {"size_65_to_127_packets",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
180         {"size_128_to_255_packets",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
182         {"size_256_to_511_packets",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
184         {"size_512_to_1023_packets",
185          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
186         {"size_1024_to_1522_packets",
187          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
188         {"size_1523_to_max_packets",
189          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
190         {"errors_with_bad_CRC",
191          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
192         {"fragmented_errors",
193          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
194         {"jabber_errors",
195          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
196         {"unknown_protos_packets",
197          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
198 };
199
200 /* [tx]_ is prepended to the name string here */
201 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
202         {"good_packets",
203          offsetof(struct vhost_queue, stats.pkts)},
204         {"total_bytes",
205          offsetof(struct vhost_queue, stats.bytes)},
206         {"missed_pkts",
207          offsetof(struct vhost_queue, stats.missed_pkts)},
208         {"broadcast_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
210         {"multicast_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
212         {"unicast_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
214         {"undersize_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
216         {"size_64_packets",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
218         {"size_65_to_127_packets",
219          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
220         {"size_128_to_255_packets",
221          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
222         {"size_256_to_511_packets",
223          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
224         {"size_512_to_1023_packets",
225          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
226         {"size_1024_to_1522_packets",
227          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
228         {"size_1523_to_max_packets",
229          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
230         {"errors_with_bad_CRC",
231          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
232 };
233
234 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
235                                 sizeof(vhost_rxport_stat_strings[0]))
236
237 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
238                                 sizeof(vhost_txport_stat_strings[0]))
239
240 static void
241 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
242 {
243         struct vhost_queue *vq = NULL;
244         unsigned int i = 0;
245
246         for (i = 0; i < dev->data->nb_rx_queues; i++) {
247                 vq = dev->data->rx_queues[i];
248                 if (!vq)
249                         continue;
250                 memset(&vq->stats, 0, sizeof(vq->stats));
251         }
252         for (i = 0; i < dev->data->nb_tx_queues; i++) {
253                 vq = dev->data->tx_queues[i];
254                 if (!vq)
255                         continue;
256                 memset(&vq->stats, 0, sizeof(vq->stats));
257         }
258 }
259
260 static int
261 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
262                            struct rte_eth_xstat_name *xstats_names,
263                            unsigned int limit __rte_unused)
264 {
265         unsigned int t = 0;
266         int count = 0;
267         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
268
269         if (!xstats_names)
270                 return nstats;
271         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
272                 snprintf(xstats_names[count].name,
273                          sizeof(xstats_names[count].name),
274                          "rx_%s", vhost_rxport_stat_strings[t].name);
275                 count++;
276         }
277         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
278                 snprintf(xstats_names[count].name,
279                          sizeof(xstats_names[count].name),
280                          "tx_%s", vhost_txport_stat_strings[t].name);
281                 count++;
282         }
283         return count;
284 }
285
286 static int
287 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
288                      unsigned int n)
289 {
290         unsigned int i;
291         unsigned int t;
292         unsigned int count = 0;
293         struct vhost_queue *vq = NULL;
294         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
295
296         if (n < nxstats)
297                 return nxstats;
298
299         for (i = 0; i < dev->data->nb_rx_queues; i++) {
300                 vq = dev->data->rx_queues[i];
301                 if (!vq)
302                         continue;
303                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
304                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
305                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
306         }
307         for (i = 0; i < dev->data->nb_tx_queues; i++) {
308                 vq = dev->data->tx_queues[i];
309                 if (!vq)
310                         continue;
311                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
312                                 + vq->stats.missed_pkts
313                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
314                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
315         }
316         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
317                 xstats[count].value = 0;
318                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
319                         vq = dev->data->rx_queues[i];
320                         if (!vq)
321                                 continue;
322                         xstats[count].value +=
323                                 *(uint64_t *)(((char *)vq)
324                                 + vhost_rxport_stat_strings[t].offset);
325                 }
326                 xstats[count].id = count;
327                 count++;
328         }
329         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
330                 xstats[count].value = 0;
331                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
332                         vq = dev->data->tx_queues[i];
333                         if (!vq)
334                                 continue;
335                         xstats[count].value +=
336                                 *(uint64_t *)(((char *)vq)
337                                 + vhost_txport_stat_strings[t].offset);
338                 }
339                 xstats[count].id = count;
340                 count++;
341         }
342         return count;
343 }
344
345 static inline void
346 vhost_count_multicast_broadcast(struct vhost_queue *vq,
347                                 struct rte_mbuf *mbuf)
348 {
349         struct ether_addr *ea = NULL;
350         struct vhost_stats *pstats = &vq->stats;
351
352         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
353         if (is_multicast_ether_addr(ea)) {
354                 if (is_broadcast_ether_addr(ea))
355                         pstats->xstats[VHOST_BROADCAST_PKT]++;
356                 else
357                         pstats->xstats[VHOST_MULTICAST_PKT]++;
358         }
359 }
360
361 static void
362 vhost_update_packet_xstats(struct vhost_queue *vq,
363                            struct rte_mbuf **bufs,
364                            uint16_t count)
365 {
366         uint32_t pkt_len = 0;
367         uint64_t i = 0;
368         uint64_t index;
369         struct vhost_stats *pstats = &vq->stats;
370
371         for (i = 0; i < count ; i++) {
372                 pkt_len = bufs[i]->pkt_len;
373                 if (pkt_len == 64) {
374                         pstats->xstats[VHOST_64_PKT]++;
375                 } else if (pkt_len > 64 && pkt_len < 1024) {
376                         index = (sizeof(pkt_len) * 8)
377                                 - __builtin_clz(pkt_len) - 5;
378                         pstats->xstats[index]++;
379                 } else {
380                         if (pkt_len < 64)
381                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
382                         else if (pkt_len <= 1522)
383                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
384                         else if (pkt_len > 1522)
385                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
386                 }
387                 vhost_count_multicast_broadcast(vq, bufs[i]);
388         }
389 }
390
391 static uint16_t
392 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
393 {
394         struct vhost_queue *r = q;
395         uint16_t i, nb_rx = 0;
396         uint16_t nb_receive = nb_bufs;
397
398         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
399                 return 0;
400
401         rte_atomic32_set(&r->while_queuing, 1);
402
403         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
404                 goto out;
405
406         /* Dequeue packets from guest TX queue */
407         while (nb_receive) {
408                 uint16_t nb_pkts;
409                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
410                                                  VHOST_MAX_PKT_BURST);
411
412                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
413                                                   r->mb_pool, &bufs[nb_rx],
414                                                   num);
415
416                 nb_rx += nb_pkts;
417                 nb_receive -= nb_pkts;
418                 if (nb_pkts < num)
419                         break;
420         }
421
422         r->stats.pkts += nb_rx;
423
424         for (i = 0; likely(i < nb_rx); i++) {
425                 bufs[i]->port = r->port;
426                 bufs[i]->ol_flags = 0;
427                 bufs[i]->vlan_tci = 0;
428
429                 if (r->internal->vlan_strip)
430                         rte_vlan_strip(bufs[i]);
431
432                 r->stats.bytes += bufs[i]->pkt_len;
433         }
434
435         vhost_update_packet_xstats(r, bufs, nb_rx);
436
437 out:
438         rte_atomic32_set(&r->while_queuing, 0);
439
440         return nb_rx;
441 }
442
443 static uint16_t
444 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
445 {
446         struct vhost_queue *r = q;
447         uint16_t i, nb_tx = 0;
448         uint16_t nb_send = 0;
449
450         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
451                 return 0;
452
453         rte_atomic32_set(&r->while_queuing, 1);
454
455         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
456                 goto out;
457
458         for (i = 0; i < nb_bufs; i++) {
459                 struct rte_mbuf *m = bufs[i];
460
461                 /* Do VLAN tag insertion */
462                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
463                         int error = rte_vlan_insert(&m);
464                         if (unlikely(error)) {
465                                 rte_pktmbuf_free(m);
466                                 continue;
467                         }
468                 }
469
470                 bufs[nb_send] = m;
471                 ++nb_send;
472         }
473
474         /* Enqueue packets to guest RX queue */
475         while (nb_send) {
476                 uint16_t nb_pkts;
477                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
478                                                  VHOST_MAX_PKT_BURST);
479
480                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
481                                                   &bufs[nb_tx], num);
482
483                 nb_tx += nb_pkts;
484                 nb_send -= nb_pkts;
485                 if (nb_pkts < num)
486                         break;
487         }
488
489         r->stats.pkts += nb_tx;
490         r->stats.missed_pkts += nb_bufs - nb_tx;
491
492         for (i = 0; likely(i < nb_tx); i++)
493                 r->stats.bytes += bufs[i]->pkt_len;
494
495         vhost_update_packet_xstats(r, bufs, nb_tx);
496
497         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
498          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
499          * are increased when packets are not transmitted successfully.
500          */
501         for (i = nb_tx; i < nb_bufs; i++)
502                 vhost_count_multicast_broadcast(r, bufs[i]);
503
504         for (i = 0; likely(i < nb_tx); i++)
505                 rte_pktmbuf_free(bufs[i]);
506 out:
507         rte_atomic32_set(&r->while_queuing, 0);
508
509         return nb_tx;
510 }
511
512 static int
513 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
514 {
515         struct pmd_internal *internal = dev->data->dev_private;
516         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
517
518         internal->vlan_strip = rxmode->hw_vlan_strip;
519
520         if (rxmode->hw_vlan_filter)
521                 RTE_LOG(WARNING, PMD,
522                         "vhost(%s): vlan filtering not available\n",
523                         internal->dev_name);
524
525         return 0;
526 }
527
528 static inline struct internal_list *
529 find_internal_resource(char *ifname)
530 {
531         int found = 0;
532         struct internal_list *list;
533         struct pmd_internal *internal;
534
535         if (ifname == NULL)
536                 return NULL;
537
538         pthread_mutex_lock(&internal_list_lock);
539
540         TAILQ_FOREACH(list, &internal_list, next) {
541                 internal = list->eth_dev->data->dev_private;
542                 if (!strcmp(internal->iface_name, ifname)) {
543                         found = 1;
544                         break;
545                 }
546         }
547
548         pthread_mutex_unlock(&internal_list_lock);
549
550         if (!found)
551                 return NULL;
552
553         return list;
554 }
555
556 static void
557 update_queuing_status(struct rte_eth_dev *dev)
558 {
559         struct pmd_internal *internal = dev->data->dev_private;
560         struct vhost_queue *vq;
561         unsigned int i;
562         int allow_queuing = 1;
563
564         if (rte_atomic32_read(&internal->dev_attached) == 0)
565                 return;
566
567         if (rte_atomic32_read(&internal->started) == 0)
568                 allow_queuing = 0;
569
570         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
571         for (i = 0; i < dev->data->nb_rx_queues; i++) {
572                 vq = dev->data->rx_queues[i];
573                 if (vq == NULL)
574                         continue;
575                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
576                 while (rte_atomic32_read(&vq->while_queuing))
577                         rte_pause();
578         }
579
580         for (i = 0; i < dev->data->nb_tx_queues; i++) {
581                 vq = dev->data->tx_queues[i];
582                 if (vq == NULL)
583                         continue;
584                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
585                 while (rte_atomic32_read(&vq->while_queuing))
586                         rte_pause();
587         }
588 }
589
590 static void
591 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
592 {
593         struct vhost_queue *vq;
594         int i;
595
596         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
597                 vq = eth_dev->data->rx_queues[i];
598                 if (!vq)
599                         continue;
600                 vq->vid = internal->vid;
601                 vq->internal = internal;
602                 vq->port = eth_dev->data->port_id;
603         }
604         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
605                 vq = eth_dev->data->tx_queues[i];
606                 if (!vq)
607                         continue;
608                 vq->vid = internal->vid;
609                 vq->internal = internal;
610                 vq->port = eth_dev->data->port_id;
611         }
612 }
613
614 static int
615 new_device(int vid)
616 {
617         struct rte_eth_dev *eth_dev;
618         struct internal_list *list;
619         struct pmd_internal *internal;
620         unsigned i;
621         char ifname[PATH_MAX];
622 #ifdef RTE_LIBRTE_VHOST_NUMA
623         int newnode;
624 #endif
625
626         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
627         list = find_internal_resource(ifname);
628         if (list == NULL) {
629                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
630                 return -1;
631         }
632
633         eth_dev = list->eth_dev;
634         internal = eth_dev->data->dev_private;
635
636 #ifdef RTE_LIBRTE_VHOST_NUMA
637         newnode = rte_vhost_get_numa_node(vid);
638         if (newnode >= 0)
639                 eth_dev->data->numa_node = newnode;
640 #endif
641
642         internal->vid = vid;
643         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
644                 queue_setup(eth_dev, internal);
645                 rte_atomic32_set(&internal->dev_attached, 1);
646         } else {
647                 RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
648                 rte_atomic32_set(&internal->dev_attached, 0);
649         }
650
651         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
652                 rte_vhost_enable_guest_notification(vid, i, 0);
653
654         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
655
656         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
657
658         update_queuing_status(eth_dev);
659
660         RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
661
662         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
663
664         return 0;
665 }
666
667 static void
668 destroy_device(int vid)
669 {
670         struct rte_eth_dev *eth_dev;
671         struct pmd_internal *internal;
672         struct vhost_queue *vq;
673         struct internal_list *list;
674         char ifname[PATH_MAX];
675         unsigned i;
676         struct rte_vhost_vring_state *state;
677
678         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
679         list = find_internal_resource(ifname);
680         if (list == NULL) {
681                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
682                 return;
683         }
684         eth_dev = list->eth_dev;
685         internal = eth_dev->data->dev_private;
686
687         rte_atomic32_set(&internal->started, 0);
688         update_queuing_status(eth_dev);
689         rte_atomic32_set(&internal->dev_attached, 0);
690
691         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
692
693         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
694                 vq = eth_dev->data->rx_queues[i];
695                 if (vq == NULL)
696                         continue;
697                 vq->vid = -1;
698         }
699         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
700                 vq = eth_dev->data->tx_queues[i];
701                 if (vq == NULL)
702                         continue;
703                 vq->vid = -1;
704         }
705
706         state = vring_states[eth_dev->data->port_id];
707         rte_spinlock_lock(&state->lock);
708         for (i = 0; i <= state->max_vring; i++) {
709                 state->cur[i] = false;
710                 state->seen[i] = false;
711         }
712         state->max_vring = 0;
713         rte_spinlock_unlock(&state->lock);
714
715         RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
716
717         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
718 }
719
720 static int
721 vring_state_changed(int vid, uint16_t vring, int enable)
722 {
723         struct rte_vhost_vring_state *state;
724         struct rte_eth_dev *eth_dev;
725         struct internal_list *list;
726         char ifname[PATH_MAX];
727
728         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
729         list = find_internal_resource(ifname);
730         if (list == NULL) {
731                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
732                 return -1;
733         }
734
735         eth_dev = list->eth_dev;
736         /* won't be NULL */
737         state = vring_states[eth_dev->data->port_id];
738         rte_spinlock_lock(&state->lock);
739         state->cur[vring] = enable;
740         state->max_vring = RTE_MAX(vring, state->max_vring);
741         rte_spinlock_unlock(&state->lock);
742
743         RTE_LOG(INFO, PMD, "vring%u is %s\n",
744                         vring, enable ? "enabled" : "disabled");
745
746         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
747
748         return 0;
749 }
750
751 static struct vhost_device_ops vhost_ops = {
752         .new_device          = new_device,
753         .destroy_device      = destroy_device,
754         .vring_state_changed = vring_state_changed,
755 };
756
757 int
758 rte_eth_vhost_get_queue_event(uint16_t port_id,
759                 struct rte_eth_vhost_queue_event *event)
760 {
761         struct rte_vhost_vring_state *state;
762         unsigned int i;
763         int idx;
764
765         if (port_id >= RTE_MAX_ETHPORTS) {
766                 RTE_LOG(ERR, PMD, "Invalid port id\n");
767                 return -1;
768         }
769
770         state = vring_states[port_id];
771         if (!state) {
772                 RTE_LOG(ERR, PMD, "Unused port\n");
773                 return -1;
774         }
775
776         rte_spinlock_lock(&state->lock);
777         for (i = 0; i <= state->max_vring; i++) {
778                 idx = state->index++ % (state->max_vring + 1);
779
780                 if (state->cur[idx] != state->seen[idx]) {
781                         state->seen[idx] = state->cur[idx];
782                         event->queue_id = idx / 2;
783                         event->rx = idx & 1;
784                         event->enable = state->cur[idx];
785                         rte_spinlock_unlock(&state->lock);
786                         return 0;
787                 }
788         }
789         rte_spinlock_unlock(&state->lock);
790
791         return -1;
792 }
793
794 int
795 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
796 {
797         struct internal_list *list;
798         struct rte_eth_dev *eth_dev;
799         struct vhost_queue *vq;
800         int vid = -1;
801
802         if (!rte_eth_dev_is_valid_port(port_id))
803                 return -1;
804
805         pthread_mutex_lock(&internal_list_lock);
806
807         TAILQ_FOREACH(list, &internal_list, next) {
808                 eth_dev = list->eth_dev;
809                 if (eth_dev->data->port_id == port_id) {
810                         vq = eth_dev->data->rx_queues[0];
811                         if (vq) {
812                                 vid = vq->vid;
813                         }
814                         break;
815                 }
816         }
817
818         pthread_mutex_unlock(&internal_list_lock);
819
820         return vid;
821 }
822
823 static int
824 eth_dev_start(struct rte_eth_dev *eth_dev)
825 {
826         struct pmd_internal *internal = eth_dev->data->dev_private;
827
828         if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
829                 queue_setup(eth_dev, internal);
830                 rte_atomic32_set(&internal->dev_attached, 1);
831         }
832
833         rte_atomic32_set(&internal->started, 1);
834         update_queuing_status(eth_dev);
835
836         return 0;
837 }
838
839 static void
840 eth_dev_stop(struct rte_eth_dev *dev)
841 {
842         struct pmd_internal *internal = dev->data->dev_private;
843
844         rte_atomic32_set(&internal->started, 0);
845         update_queuing_status(dev);
846 }
847
848 static void
849 eth_dev_close(struct rte_eth_dev *dev)
850 {
851         struct pmd_internal *internal;
852         struct internal_list *list;
853         unsigned int i;
854
855         internal = dev->data->dev_private;
856         if (!internal)
857                 return;
858
859         eth_dev_stop(dev);
860
861         rte_vhost_driver_unregister(internal->iface_name);
862
863         list = find_internal_resource(internal->iface_name);
864         if (!list)
865                 return;
866
867         pthread_mutex_lock(&internal_list_lock);
868         TAILQ_REMOVE(&internal_list, list, next);
869         pthread_mutex_unlock(&internal_list_lock);
870         rte_free(list);
871
872         for (i = 0; i < dev->data->nb_rx_queues; i++)
873                 rte_free(dev->data->rx_queues[i]);
874         for (i = 0; i < dev->data->nb_tx_queues; i++)
875                 rte_free(dev->data->tx_queues[i]);
876
877         rte_free(dev->data->mac_addrs);
878         free(internal->dev_name);
879         free(internal->iface_name);
880         rte_free(internal);
881
882         dev->data->dev_private = NULL;
883 }
884
885 static int
886 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
887                    uint16_t nb_rx_desc __rte_unused,
888                    unsigned int socket_id,
889                    const struct rte_eth_rxconf *rx_conf __rte_unused,
890                    struct rte_mempool *mb_pool)
891 {
892         struct vhost_queue *vq;
893
894         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
895                         RTE_CACHE_LINE_SIZE, socket_id);
896         if (vq == NULL) {
897                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
898                 return -ENOMEM;
899         }
900
901         vq->mb_pool = mb_pool;
902         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
903         dev->data->rx_queues[rx_queue_id] = vq;
904
905         return 0;
906 }
907
908 static int
909 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
910                    uint16_t nb_tx_desc __rte_unused,
911                    unsigned int socket_id,
912                    const struct rte_eth_txconf *tx_conf __rte_unused)
913 {
914         struct vhost_queue *vq;
915
916         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
917                         RTE_CACHE_LINE_SIZE, socket_id);
918         if (vq == NULL) {
919                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
920                 return -ENOMEM;
921         }
922
923         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
924         dev->data->tx_queues[tx_queue_id] = vq;
925
926         return 0;
927 }
928
929 static void
930 eth_dev_info(struct rte_eth_dev *dev,
931              struct rte_eth_dev_info *dev_info)
932 {
933         struct pmd_internal *internal;
934
935         internal = dev->data->dev_private;
936         if (internal == NULL) {
937                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
938                 return;
939         }
940
941         dev_info->max_mac_addrs = 1;
942         dev_info->max_rx_pktlen = (uint32_t)-1;
943         dev_info->max_rx_queues = internal->max_queues;
944         dev_info->max_tx_queues = internal->max_queues;
945         dev_info->min_rx_bufsize = 0;
946 }
947
948 static int
949 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
950 {
951         unsigned i;
952         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
953         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
954         struct vhost_queue *vq;
955
956         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
957                         i < dev->data->nb_rx_queues; i++) {
958                 if (dev->data->rx_queues[i] == NULL)
959                         continue;
960                 vq = dev->data->rx_queues[i];
961                 stats->q_ipackets[i] = vq->stats.pkts;
962                 rx_total += stats->q_ipackets[i];
963
964                 stats->q_ibytes[i] = vq->stats.bytes;
965                 rx_total_bytes += stats->q_ibytes[i];
966         }
967
968         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
969                         i < dev->data->nb_tx_queues; i++) {
970                 if (dev->data->tx_queues[i] == NULL)
971                         continue;
972                 vq = dev->data->tx_queues[i];
973                 stats->q_opackets[i] = vq->stats.pkts;
974                 tx_missed_total += vq->stats.missed_pkts;
975                 tx_total += stats->q_opackets[i];
976
977                 stats->q_obytes[i] = vq->stats.bytes;
978                 tx_total_bytes += stats->q_obytes[i];
979         }
980
981         stats->ipackets = rx_total;
982         stats->opackets = tx_total;
983         stats->oerrors = tx_missed_total;
984         stats->ibytes = rx_total_bytes;
985         stats->obytes = tx_total_bytes;
986
987         return 0;
988 }
989
990 static void
991 eth_stats_reset(struct rte_eth_dev *dev)
992 {
993         struct vhost_queue *vq;
994         unsigned i;
995
996         for (i = 0; i < dev->data->nb_rx_queues; i++) {
997                 if (dev->data->rx_queues[i] == NULL)
998                         continue;
999                 vq = dev->data->rx_queues[i];
1000                 vq->stats.pkts = 0;
1001                 vq->stats.bytes = 0;
1002         }
1003         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1004                 if (dev->data->tx_queues[i] == NULL)
1005                         continue;
1006                 vq = dev->data->tx_queues[i];
1007                 vq->stats.pkts = 0;
1008                 vq->stats.bytes = 0;
1009                 vq->stats.missed_pkts = 0;
1010         }
1011 }
1012
1013 static void
1014 eth_queue_release(void *q)
1015 {
1016         rte_free(q);
1017 }
1018
1019 static int
1020 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1021 {
1022         /*
1023          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1024          * and releases mbuf, so nothing to cleanup.
1025          */
1026         return 0;
1027 }
1028
1029 static int
1030 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1031                 int wait_to_complete __rte_unused)
1032 {
1033         return 0;
1034 }
1035
1036 static uint32_t
1037 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1038 {
1039         struct vhost_queue *vq;
1040
1041         vq = dev->data->rx_queues[rx_queue_id];
1042         if (vq == NULL)
1043                 return 0;
1044
1045         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1046 }
1047
1048 static const struct eth_dev_ops ops = {
1049         .dev_start = eth_dev_start,
1050         .dev_stop = eth_dev_stop,
1051         .dev_close = eth_dev_close,
1052         .dev_configure = eth_dev_configure,
1053         .dev_infos_get = eth_dev_info,
1054         .rx_queue_setup = eth_rx_queue_setup,
1055         .tx_queue_setup = eth_tx_queue_setup,
1056         .rx_queue_release = eth_queue_release,
1057         .tx_queue_release = eth_queue_release,
1058         .tx_done_cleanup = eth_tx_done_cleanup,
1059         .rx_queue_count = eth_rx_queue_count,
1060         .link_update = eth_link_update,
1061         .stats_get = eth_stats_get,
1062         .stats_reset = eth_stats_reset,
1063         .xstats_reset = vhost_dev_xstats_reset,
1064         .xstats_get = vhost_dev_xstats_get,
1065         .xstats_get_names = vhost_dev_xstats_get_names,
1066 };
1067
1068 static struct rte_vdev_driver pmd_vhost_drv;
1069
1070 static int
1071 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1072         int16_t queues, const unsigned int numa_node, uint64_t flags)
1073 {
1074         const char *name = rte_vdev_device_name(dev);
1075         struct rte_eth_dev_data *data = NULL;
1076         struct pmd_internal *internal = NULL;
1077         struct rte_eth_dev *eth_dev = NULL;
1078         struct ether_addr *eth_addr = NULL;
1079         struct rte_vhost_vring_state *vring_state = NULL;
1080         struct internal_list *list = NULL;
1081
1082         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1083                 numa_node);
1084
1085         /* now do all data allocation - for eth_dev structure and internal
1086          * (private) data
1087          */
1088         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1089         if (data == NULL)
1090                 goto error;
1091
1092         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1093         if (list == NULL)
1094                 goto error;
1095
1096         /* reserve an ethdev entry */
1097         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1098         if (eth_dev == NULL)
1099                 goto error;
1100
1101         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1102         if (eth_addr == NULL)
1103                 goto error;
1104         *eth_addr = base_eth_addr;
1105         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1106
1107         vring_state = rte_zmalloc_socket(name,
1108                         sizeof(*vring_state), 0, numa_node);
1109         if (vring_state == NULL)
1110                 goto error;
1111
1112         /* now put it all together
1113          * - store queue data in internal,
1114          * - point eth_dev_data to internals
1115          * - and point eth_dev structure to new eth_dev_data structure
1116          */
1117         internal = eth_dev->data->dev_private;
1118         internal->dev_name = strdup(name);
1119         if (internal->dev_name == NULL)
1120                 goto error;
1121         internal->iface_name = strdup(iface_name);
1122         if (internal->iface_name == NULL)
1123                 goto error;
1124
1125         list->eth_dev = eth_dev;
1126         pthread_mutex_lock(&internal_list_lock);
1127         TAILQ_INSERT_TAIL(&internal_list, list, next);
1128         pthread_mutex_unlock(&internal_list_lock);
1129
1130         rte_spinlock_init(&vring_state->lock);
1131         vring_states[eth_dev->data->port_id] = vring_state;
1132
1133         /* We'll replace the 'data' originally allocated by eth_dev. So the
1134          * vhost PMD resources won't be shared between multi processes.
1135          */
1136         rte_memcpy(data, eth_dev->data, sizeof(*data));
1137         eth_dev->data = data;
1138
1139         data->nb_rx_queues = queues;
1140         data->nb_tx_queues = queues;
1141         internal->max_queues = queues;
1142         data->dev_link = pmd_link;
1143         data->mac_addrs = eth_addr;
1144         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1145
1146         eth_dev->dev_ops = &ops;
1147
1148         /* finally assign rx and tx ops */
1149         eth_dev->rx_pkt_burst = eth_vhost_rx;
1150         eth_dev->tx_pkt_burst = eth_vhost_tx;
1151
1152         if (rte_vhost_driver_register(iface_name, flags))
1153                 goto error;
1154
1155         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1156                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1157                 goto error;
1158         }
1159
1160         if (rte_vhost_driver_start(iface_name) < 0) {
1161                 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1162                         iface_name);
1163                 goto error;
1164         }
1165
1166         return data->port_id;
1167
1168 error:
1169         if (internal) {
1170                 free(internal->iface_name);
1171                 free(internal->dev_name);
1172         }
1173         rte_free(vring_state);
1174         rte_free(eth_addr);
1175         if (eth_dev)
1176                 rte_eth_dev_release_port(eth_dev);
1177         rte_free(internal);
1178         rte_free(list);
1179         rte_free(data);
1180
1181         return -1;
1182 }
1183
1184 static inline int
1185 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1186 {
1187         const char **iface_name = extra_args;
1188
1189         if (value == NULL)
1190                 return -1;
1191
1192         *iface_name = value;
1193
1194         return 0;
1195 }
1196
1197 static inline int
1198 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1199 {
1200         uint16_t *n = extra_args;
1201
1202         if (value == NULL || extra_args == NULL)
1203                 return -EINVAL;
1204
1205         *n = (uint16_t)strtoul(value, NULL, 0);
1206         if (*n == USHRT_MAX && errno == ERANGE)
1207                 return -1;
1208
1209         return 0;
1210 }
1211
1212 static int
1213 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1214 {
1215         struct rte_kvargs *kvlist = NULL;
1216         int ret = 0;
1217         char *iface_name;
1218         uint16_t queues;
1219         uint64_t flags = 0;
1220         int client_mode = 0;
1221         int dequeue_zero_copy = 0;
1222         int iommu_support = 0;
1223
1224         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1225                 rte_vdev_device_name(dev));
1226
1227         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1228         if (kvlist == NULL)
1229                 return -1;
1230
1231         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1232                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1233                                          &open_iface, &iface_name);
1234                 if (ret < 0)
1235                         goto out_free;
1236         } else {
1237                 ret = -1;
1238                 goto out_free;
1239         }
1240
1241         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1242                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1243                                          &open_int, &queues);
1244                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1245                         goto out_free;
1246
1247         } else
1248                 queues = 1;
1249
1250         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1251                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1252                                          &open_int, &client_mode);
1253                 if (ret < 0)
1254                         goto out_free;
1255
1256                 if (client_mode)
1257                         flags |= RTE_VHOST_USER_CLIENT;
1258         }
1259
1260         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1261                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1262                                          &open_int, &dequeue_zero_copy);
1263                 if (ret < 0)
1264                         goto out_free;
1265
1266                 if (dequeue_zero_copy)
1267                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1268         }
1269
1270         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1271                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1272                                          &open_int, &iommu_support);
1273                 if (ret < 0)
1274                         goto out_free;
1275
1276                 if (iommu_support)
1277                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1278         }
1279
1280         if (dev->device.numa_node == SOCKET_ID_ANY)
1281                 dev->device.numa_node = rte_socket_id();
1282
1283         eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1284                 flags);
1285
1286 out_free:
1287         rte_kvargs_free(kvlist);
1288         return ret;
1289 }
1290
1291 static int
1292 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1293 {
1294         const char *name;
1295         struct rte_eth_dev *eth_dev = NULL;
1296
1297         name = rte_vdev_device_name(dev);
1298         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1299
1300         /* find an ethdev entry */
1301         eth_dev = rte_eth_dev_allocated(name);
1302         if (eth_dev == NULL)
1303                 return -ENODEV;
1304
1305         eth_dev_close(eth_dev);
1306
1307         rte_free(vring_states[eth_dev->data->port_id]);
1308         vring_states[eth_dev->data->port_id] = NULL;
1309
1310         rte_free(eth_dev->data);
1311
1312         rte_eth_dev_release_port(eth_dev);
1313
1314         return 0;
1315 }
1316
1317 static struct rte_vdev_driver pmd_vhost_drv = {
1318         .probe = rte_pmd_vhost_probe,
1319         .remove = rte_pmd_vhost_remove,
1320 };
1321
1322 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1323 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1324 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1325         "iface=<ifc> "
1326         "queues=<int>");