net/qede/base: add new chain API
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev_driver.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
46
47 #include "rte_eth_vhost.h"
48
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
56 #define VHOST_MAX_PKT_BURST 32
57
58 static const char *valid_arguments[] = {
59         ETH_VHOST_IFACE_ARG,
60         ETH_VHOST_QUEUES_ARG,
61         ETH_VHOST_CLIENT_ARG,
62         ETH_VHOST_DEQUEUE_ZERO_COPY,
63         ETH_VHOST_IOMMU_SUPPORT,
64         NULL
65 };
66
67 static struct ether_addr base_eth_addr = {
68         .addr_bytes = {
69                 0x56 /* V */,
70                 0x48 /* H */,
71                 0x4F /* O */,
72                 0x53 /* S */,
73                 0x54 /* T */,
74                 0x00
75         }
76 };
77
78 enum vhost_xstats_pkts {
79         VHOST_UNDERSIZE_PKT = 0,
80         VHOST_64_PKT,
81         VHOST_65_TO_127_PKT,
82         VHOST_128_TO_255_PKT,
83         VHOST_256_TO_511_PKT,
84         VHOST_512_TO_1023_PKT,
85         VHOST_1024_TO_1522_PKT,
86         VHOST_1523_TO_MAX_PKT,
87         VHOST_BROADCAST_PKT,
88         VHOST_MULTICAST_PKT,
89         VHOST_UNICAST_PKT,
90         VHOST_ERRORS_PKT,
91         VHOST_ERRORS_FRAGMENTED,
92         VHOST_ERRORS_JABBER,
93         VHOST_UNKNOWN_PROTOCOL,
94         VHOST_XSTATS_MAX,
95 };
96
97 struct vhost_stats {
98         uint64_t pkts;
99         uint64_t bytes;
100         uint64_t missed_pkts;
101         uint64_t xstats[VHOST_XSTATS_MAX];
102 };
103
104 struct vhost_queue {
105         int vid;
106         rte_atomic32_t allow_queuing;
107         rte_atomic32_t while_queuing;
108         struct pmd_internal *internal;
109         struct rte_mempool *mb_pool;
110         uint16_t port;
111         uint16_t virtqueue_id;
112         struct vhost_stats stats;
113 };
114
115 struct pmd_internal {
116         rte_atomic32_t dev_attached;
117         char *dev_name;
118         char *iface_name;
119         uint16_t max_queues;
120         uint16_t vid;
121         rte_atomic32_t started;
122 };
123
124 struct internal_list {
125         TAILQ_ENTRY(internal_list) next;
126         struct rte_eth_dev *eth_dev;
127 };
128
129 TAILQ_HEAD(internal_list_head, internal_list);
130 static struct internal_list_head internal_list =
131         TAILQ_HEAD_INITIALIZER(internal_list);
132
133 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
134
135 static struct rte_eth_link pmd_link = {
136                 .link_speed = 10000,
137                 .link_duplex = ETH_LINK_FULL_DUPLEX,
138                 .link_status = ETH_LINK_DOWN
139 };
140
141 struct rte_vhost_vring_state {
142         rte_spinlock_t lock;
143
144         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
145         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
146         unsigned int index;
147         unsigned int max_vring;
148 };
149
150 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
151
152 #define VHOST_XSTATS_NAME_SIZE 64
153
154 struct vhost_xstats_name_off {
155         char name[VHOST_XSTATS_NAME_SIZE];
156         uint64_t offset;
157 };
158
159 /* [rx]_is prepended to the name string here */
160 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
161         {"good_packets",
162          offsetof(struct vhost_queue, stats.pkts)},
163         {"total_bytes",
164          offsetof(struct vhost_queue, stats.bytes)},
165         {"missed_pkts",
166          offsetof(struct vhost_queue, stats.missed_pkts)},
167         {"broadcast_packets",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
169         {"multicast_packets",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
171         {"unicast_packets",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
173          {"undersize_packets",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
175         {"size_64_packets",
176          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
177         {"size_65_to_127_packets",
178          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
179         {"size_128_to_255_packets",
180          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
181         {"size_256_to_511_packets",
182          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
183         {"size_512_to_1023_packets",
184          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
185         {"size_1024_to_1522_packets",
186          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
187         {"size_1523_to_max_packets",
188          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
189         {"errors_with_bad_CRC",
190          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
191         {"fragmented_errors",
192          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
193         {"jabber_errors",
194          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
195         {"unknown_protos_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
197 };
198
199 /* [tx]_ is prepended to the name string here */
200 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
201         {"good_packets",
202          offsetof(struct vhost_queue, stats.pkts)},
203         {"total_bytes",
204          offsetof(struct vhost_queue, stats.bytes)},
205         {"missed_pkts",
206          offsetof(struct vhost_queue, stats.missed_pkts)},
207         {"broadcast_packets",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
209         {"multicast_packets",
210          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
211         {"unicast_packets",
212          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
213         {"undersize_packets",
214          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
215         {"size_64_packets",
216          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
217         {"size_65_to_127_packets",
218          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
219         {"size_128_to_255_packets",
220          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
221         {"size_256_to_511_packets",
222          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
223         {"size_512_to_1023_packets",
224          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
225         {"size_1024_to_1522_packets",
226          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
227         {"size_1523_to_max_packets",
228          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
229         {"errors_with_bad_CRC",
230          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
231 };
232
233 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
234                                 sizeof(vhost_rxport_stat_strings[0]))
235
236 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
237                                 sizeof(vhost_txport_stat_strings[0]))
238
239 static void
240 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
241 {
242         struct vhost_queue *vq = NULL;
243         unsigned int i = 0;
244
245         for (i = 0; i < dev->data->nb_rx_queues; i++) {
246                 vq = dev->data->rx_queues[i];
247                 if (!vq)
248                         continue;
249                 memset(&vq->stats, 0, sizeof(vq->stats));
250         }
251         for (i = 0; i < dev->data->nb_tx_queues; i++) {
252                 vq = dev->data->tx_queues[i];
253                 if (!vq)
254                         continue;
255                 memset(&vq->stats, 0, sizeof(vq->stats));
256         }
257 }
258
259 static int
260 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
261                            struct rte_eth_xstat_name *xstats_names,
262                            unsigned int limit __rte_unused)
263 {
264         unsigned int t = 0;
265         int count = 0;
266         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
267
268         if (!xstats_names)
269                 return nstats;
270         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
271                 snprintf(xstats_names[count].name,
272                          sizeof(xstats_names[count].name),
273                          "rx_%s", vhost_rxport_stat_strings[t].name);
274                 count++;
275         }
276         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
277                 snprintf(xstats_names[count].name,
278                          sizeof(xstats_names[count].name),
279                          "tx_%s", vhost_txport_stat_strings[t].name);
280                 count++;
281         }
282         return count;
283 }
284
285 static int
286 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
287                      unsigned int n)
288 {
289         unsigned int i;
290         unsigned int t;
291         unsigned int count = 0;
292         struct vhost_queue *vq = NULL;
293         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
294
295         if (n < nxstats)
296                 return nxstats;
297
298         for (i = 0; i < dev->data->nb_rx_queues; i++) {
299                 vq = dev->data->rx_queues[i];
300                 if (!vq)
301                         continue;
302                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
303                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
304                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
305         }
306         for (i = 0; i < dev->data->nb_tx_queues; i++) {
307                 vq = dev->data->tx_queues[i];
308                 if (!vq)
309                         continue;
310                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
311                                 + vq->stats.missed_pkts
312                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
313                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
314         }
315         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
316                 xstats[count].value = 0;
317                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
318                         vq = dev->data->rx_queues[i];
319                         if (!vq)
320                                 continue;
321                         xstats[count].value +=
322                                 *(uint64_t *)(((char *)vq)
323                                 + vhost_rxport_stat_strings[t].offset);
324                 }
325                 xstats[count].id = count;
326                 count++;
327         }
328         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
329                 xstats[count].value = 0;
330                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
331                         vq = dev->data->tx_queues[i];
332                         if (!vq)
333                                 continue;
334                         xstats[count].value +=
335                                 *(uint64_t *)(((char *)vq)
336                                 + vhost_txport_stat_strings[t].offset);
337                 }
338                 xstats[count].id = count;
339                 count++;
340         }
341         return count;
342 }
343
344 static inline void
345 vhost_count_multicast_broadcast(struct vhost_queue *vq,
346                                 struct rte_mbuf *mbuf)
347 {
348         struct ether_addr *ea = NULL;
349         struct vhost_stats *pstats = &vq->stats;
350
351         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
352         if (is_multicast_ether_addr(ea)) {
353                 if (is_broadcast_ether_addr(ea))
354                         pstats->xstats[VHOST_BROADCAST_PKT]++;
355                 else
356                         pstats->xstats[VHOST_MULTICAST_PKT]++;
357         }
358 }
359
360 static void
361 vhost_update_packet_xstats(struct vhost_queue *vq,
362                            struct rte_mbuf **bufs,
363                            uint16_t count)
364 {
365         uint32_t pkt_len = 0;
366         uint64_t i = 0;
367         uint64_t index;
368         struct vhost_stats *pstats = &vq->stats;
369
370         for (i = 0; i < count ; i++) {
371                 pkt_len = bufs[i]->pkt_len;
372                 if (pkt_len == 64) {
373                         pstats->xstats[VHOST_64_PKT]++;
374                 } else if (pkt_len > 64 && pkt_len < 1024) {
375                         index = (sizeof(pkt_len) * 8)
376                                 - __builtin_clz(pkt_len) - 5;
377                         pstats->xstats[index]++;
378                 } else {
379                         if (pkt_len < 64)
380                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
381                         else if (pkt_len <= 1522)
382                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
383                         else if (pkt_len > 1522)
384                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
385                 }
386                 vhost_count_multicast_broadcast(vq, bufs[i]);
387         }
388 }
389
390 static uint16_t
391 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
392 {
393         struct vhost_queue *r = q;
394         uint16_t i, nb_rx = 0;
395         uint16_t nb_receive = nb_bufs;
396
397         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
398                 return 0;
399
400         rte_atomic32_set(&r->while_queuing, 1);
401
402         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
403                 goto out;
404
405         /* Dequeue packets from guest TX queue */
406         while (nb_receive) {
407                 uint16_t nb_pkts;
408                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
409                                                  VHOST_MAX_PKT_BURST);
410
411                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
412                                                   r->mb_pool, &bufs[nb_rx],
413                                                   num);
414
415                 nb_rx += nb_pkts;
416                 nb_receive -= nb_pkts;
417                 if (nb_pkts < num)
418                         break;
419         }
420
421         r->stats.pkts += nb_rx;
422
423         for (i = 0; likely(i < nb_rx); i++) {
424                 bufs[i]->port = r->port;
425                 r->stats.bytes += bufs[i]->pkt_len;
426         }
427
428         vhost_update_packet_xstats(r, bufs, nb_rx);
429
430 out:
431         rte_atomic32_set(&r->while_queuing, 0);
432
433         return nb_rx;
434 }
435
436 static uint16_t
437 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
438 {
439         struct vhost_queue *r = q;
440         uint16_t i, nb_tx = 0;
441         uint16_t nb_send = nb_bufs;
442
443         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
444                 return 0;
445
446         rte_atomic32_set(&r->while_queuing, 1);
447
448         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
449                 goto out;
450
451         /* Enqueue packets to guest RX queue */
452         while (nb_send) {
453                 uint16_t nb_pkts;
454                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
455                                                  VHOST_MAX_PKT_BURST);
456
457                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
458                                                   &bufs[nb_tx], num);
459
460                 nb_tx += nb_pkts;
461                 nb_send -= nb_pkts;
462                 if (nb_pkts < num)
463                         break;
464         }
465
466         r->stats.pkts += nb_tx;
467         r->stats.missed_pkts += nb_bufs - nb_tx;
468
469         for (i = 0; likely(i < nb_tx); i++)
470                 r->stats.bytes += bufs[i]->pkt_len;
471
472         vhost_update_packet_xstats(r, bufs, nb_tx);
473
474         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
475          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
476          * are increased when packets are not transmitted successfully.
477          */
478         for (i = nb_tx; i < nb_bufs; i++)
479                 vhost_count_multicast_broadcast(r, bufs[i]);
480
481         for (i = 0; likely(i < nb_tx); i++)
482                 rte_pktmbuf_free(bufs[i]);
483 out:
484         rte_atomic32_set(&r->while_queuing, 0);
485
486         return nb_tx;
487 }
488
489 static int
490 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
491 {
492         return 0;
493 }
494
495 static inline struct internal_list *
496 find_internal_resource(char *ifname)
497 {
498         int found = 0;
499         struct internal_list *list;
500         struct pmd_internal *internal;
501
502         if (ifname == NULL)
503                 return NULL;
504
505         pthread_mutex_lock(&internal_list_lock);
506
507         TAILQ_FOREACH(list, &internal_list, next) {
508                 internal = list->eth_dev->data->dev_private;
509                 if (!strcmp(internal->iface_name, ifname)) {
510                         found = 1;
511                         break;
512                 }
513         }
514
515         pthread_mutex_unlock(&internal_list_lock);
516
517         if (!found)
518                 return NULL;
519
520         return list;
521 }
522
523 static void
524 update_queuing_status(struct rte_eth_dev *dev)
525 {
526         struct pmd_internal *internal = dev->data->dev_private;
527         struct vhost_queue *vq;
528         unsigned int i;
529         int allow_queuing = 1;
530
531         if (rte_atomic32_read(&internal->dev_attached) == 0)
532                 return;
533
534         if (rte_atomic32_read(&internal->started) == 0)
535                 allow_queuing = 0;
536
537         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
538         for (i = 0; i < dev->data->nb_rx_queues; i++) {
539                 vq = dev->data->rx_queues[i];
540                 if (vq == NULL)
541                         continue;
542                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
543                 while (rte_atomic32_read(&vq->while_queuing))
544                         rte_pause();
545         }
546
547         for (i = 0; i < dev->data->nb_tx_queues; i++) {
548                 vq = dev->data->tx_queues[i];
549                 if (vq == NULL)
550                         continue;
551                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
552                 while (rte_atomic32_read(&vq->while_queuing))
553                         rte_pause();
554         }
555 }
556
557 static void
558 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
559 {
560         struct vhost_queue *vq;
561         int i;
562
563         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
564                 vq = eth_dev->data->rx_queues[i];
565                 if (!vq)
566                         continue;
567                 vq->vid = internal->vid;
568                 vq->internal = internal;
569                 vq->port = eth_dev->data->port_id;
570         }
571         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
572                 vq = eth_dev->data->tx_queues[i];
573                 if (!vq)
574                         continue;
575                 vq->vid = internal->vid;
576                 vq->internal = internal;
577                 vq->port = eth_dev->data->port_id;
578         }
579 }
580
581 static int
582 new_device(int vid)
583 {
584         struct rte_eth_dev *eth_dev;
585         struct internal_list *list;
586         struct pmd_internal *internal;
587         unsigned i;
588         char ifname[PATH_MAX];
589 #ifdef RTE_LIBRTE_VHOST_NUMA
590         int newnode;
591 #endif
592
593         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
594         list = find_internal_resource(ifname);
595         if (list == NULL) {
596                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
597                 return -1;
598         }
599
600         eth_dev = list->eth_dev;
601         internal = eth_dev->data->dev_private;
602
603 #ifdef RTE_LIBRTE_VHOST_NUMA
604         newnode = rte_vhost_get_numa_node(vid);
605         if (newnode >= 0)
606                 eth_dev->data->numa_node = newnode;
607 #endif
608
609         internal->vid = vid;
610         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
611                 queue_setup(eth_dev, internal);
612                 rte_atomic32_set(&internal->dev_attached, 1);
613         } else {
614                 RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
615                 rte_atomic32_set(&internal->dev_attached, 0);
616         }
617
618         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
619                 rte_vhost_enable_guest_notification(vid, i, 0);
620
621         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
622
623         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
624
625         update_queuing_status(eth_dev);
626
627         RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
628
629         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
630
631         return 0;
632 }
633
634 static void
635 destroy_device(int vid)
636 {
637         struct rte_eth_dev *eth_dev;
638         struct pmd_internal *internal;
639         struct vhost_queue *vq;
640         struct internal_list *list;
641         char ifname[PATH_MAX];
642         unsigned i;
643         struct rte_vhost_vring_state *state;
644
645         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
646         list = find_internal_resource(ifname);
647         if (list == NULL) {
648                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
649                 return;
650         }
651         eth_dev = list->eth_dev;
652         internal = eth_dev->data->dev_private;
653
654         rte_atomic32_set(&internal->started, 0);
655         update_queuing_status(eth_dev);
656         rte_atomic32_set(&internal->dev_attached, 0);
657
658         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
659
660         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
661                 vq = eth_dev->data->rx_queues[i];
662                 if (vq == NULL)
663                         continue;
664                 vq->vid = -1;
665         }
666         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
667                 vq = eth_dev->data->tx_queues[i];
668                 if (vq == NULL)
669                         continue;
670                 vq->vid = -1;
671         }
672
673         state = vring_states[eth_dev->data->port_id];
674         rte_spinlock_lock(&state->lock);
675         for (i = 0; i <= state->max_vring; i++) {
676                 state->cur[i] = false;
677                 state->seen[i] = false;
678         }
679         state->max_vring = 0;
680         rte_spinlock_unlock(&state->lock);
681
682         RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
683
684         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
685 }
686
687 static int
688 vring_state_changed(int vid, uint16_t vring, int enable)
689 {
690         struct rte_vhost_vring_state *state;
691         struct rte_eth_dev *eth_dev;
692         struct internal_list *list;
693         char ifname[PATH_MAX];
694
695         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
696         list = find_internal_resource(ifname);
697         if (list == NULL) {
698                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
699                 return -1;
700         }
701
702         eth_dev = list->eth_dev;
703         /* won't be NULL */
704         state = vring_states[eth_dev->data->port_id];
705         rte_spinlock_lock(&state->lock);
706         state->cur[vring] = enable;
707         state->max_vring = RTE_MAX(vring, state->max_vring);
708         rte_spinlock_unlock(&state->lock);
709
710         RTE_LOG(INFO, PMD, "vring%u is %s\n",
711                         vring, enable ? "enabled" : "disabled");
712
713         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
714
715         return 0;
716 }
717
718 static struct vhost_device_ops vhost_ops = {
719         .new_device          = new_device,
720         .destroy_device      = destroy_device,
721         .vring_state_changed = vring_state_changed,
722 };
723
724 int
725 rte_eth_vhost_get_queue_event(uint16_t port_id,
726                 struct rte_eth_vhost_queue_event *event)
727 {
728         struct rte_vhost_vring_state *state;
729         unsigned int i;
730         int idx;
731
732         if (port_id >= RTE_MAX_ETHPORTS) {
733                 RTE_LOG(ERR, PMD, "Invalid port id\n");
734                 return -1;
735         }
736
737         state = vring_states[port_id];
738         if (!state) {
739                 RTE_LOG(ERR, PMD, "Unused port\n");
740                 return -1;
741         }
742
743         rte_spinlock_lock(&state->lock);
744         for (i = 0; i <= state->max_vring; i++) {
745                 idx = state->index++ % (state->max_vring + 1);
746
747                 if (state->cur[idx] != state->seen[idx]) {
748                         state->seen[idx] = state->cur[idx];
749                         event->queue_id = idx / 2;
750                         event->rx = idx & 1;
751                         event->enable = state->cur[idx];
752                         rte_spinlock_unlock(&state->lock);
753                         return 0;
754                 }
755         }
756         rte_spinlock_unlock(&state->lock);
757
758         return -1;
759 }
760
761 int
762 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
763 {
764         struct internal_list *list;
765         struct rte_eth_dev *eth_dev;
766         struct vhost_queue *vq;
767         int vid = -1;
768
769         if (!rte_eth_dev_is_valid_port(port_id))
770                 return -1;
771
772         pthread_mutex_lock(&internal_list_lock);
773
774         TAILQ_FOREACH(list, &internal_list, next) {
775                 eth_dev = list->eth_dev;
776                 if (eth_dev->data->port_id == port_id) {
777                         vq = eth_dev->data->rx_queues[0];
778                         if (vq) {
779                                 vid = vq->vid;
780                         }
781                         break;
782                 }
783         }
784
785         pthread_mutex_unlock(&internal_list_lock);
786
787         return vid;
788 }
789
790 static int
791 eth_dev_start(struct rte_eth_dev *eth_dev)
792 {
793         struct pmd_internal *internal = eth_dev->data->dev_private;
794
795         if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
796                 queue_setup(eth_dev, internal);
797                 rte_atomic32_set(&internal->dev_attached, 1);
798         }
799
800         rte_atomic32_set(&internal->started, 1);
801         update_queuing_status(eth_dev);
802
803         return 0;
804 }
805
806 static void
807 eth_dev_stop(struct rte_eth_dev *dev)
808 {
809         struct pmd_internal *internal = dev->data->dev_private;
810
811         rte_atomic32_set(&internal->started, 0);
812         update_queuing_status(dev);
813 }
814
815 static void
816 eth_dev_close(struct rte_eth_dev *dev)
817 {
818         struct pmd_internal *internal;
819         struct internal_list *list;
820         unsigned int i;
821
822         internal = dev->data->dev_private;
823         if (!internal)
824                 return;
825
826         eth_dev_stop(dev);
827
828         rte_vhost_driver_unregister(internal->iface_name);
829
830         list = find_internal_resource(internal->iface_name);
831         if (!list)
832                 return;
833
834         pthread_mutex_lock(&internal_list_lock);
835         TAILQ_REMOVE(&internal_list, list, next);
836         pthread_mutex_unlock(&internal_list_lock);
837         rte_free(list);
838
839         for (i = 0; i < dev->data->nb_rx_queues; i++)
840                 rte_free(dev->data->rx_queues[i]);
841         for (i = 0; i < dev->data->nb_tx_queues; i++)
842                 rte_free(dev->data->tx_queues[i]);
843
844         rte_free(dev->data->mac_addrs);
845         free(internal->dev_name);
846         free(internal->iface_name);
847         rte_free(internal);
848
849         dev->data->dev_private = NULL;
850 }
851
852 static int
853 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
854                    uint16_t nb_rx_desc __rte_unused,
855                    unsigned int socket_id,
856                    const struct rte_eth_rxconf *rx_conf __rte_unused,
857                    struct rte_mempool *mb_pool)
858 {
859         struct vhost_queue *vq;
860
861         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
862                         RTE_CACHE_LINE_SIZE, socket_id);
863         if (vq == NULL) {
864                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
865                 return -ENOMEM;
866         }
867
868         vq->mb_pool = mb_pool;
869         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
870         dev->data->rx_queues[rx_queue_id] = vq;
871
872         return 0;
873 }
874
875 static int
876 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
877                    uint16_t nb_tx_desc __rte_unused,
878                    unsigned int socket_id,
879                    const struct rte_eth_txconf *tx_conf __rte_unused)
880 {
881         struct vhost_queue *vq;
882
883         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
884                         RTE_CACHE_LINE_SIZE, socket_id);
885         if (vq == NULL) {
886                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
887                 return -ENOMEM;
888         }
889
890         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
891         dev->data->tx_queues[tx_queue_id] = vq;
892
893         return 0;
894 }
895
896 static void
897 eth_dev_info(struct rte_eth_dev *dev,
898              struct rte_eth_dev_info *dev_info)
899 {
900         struct pmd_internal *internal;
901
902         internal = dev->data->dev_private;
903         if (internal == NULL) {
904                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
905                 return;
906         }
907
908         dev_info->max_mac_addrs = 1;
909         dev_info->max_rx_pktlen = (uint32_t)-1;
910         dev_info->max_rx_queues = internal->max_queues;
911         dev_info->max_tx_queues = internal->max_queues;
912         dev_info->min_rx_bufsize = 0;
913 }
914
915 static int
916 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
917 {
918         unsigned i;
919         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
920         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
921         struct vhost_queue *vq;
922
923         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
924                         i < dev->data->nb_rx_queues; i++) {
925                 if (dev->data->rx_queues[i] == NULL)
926                         continue;
927                 vq = dev->data->rx_queues[i];
928                 stats->q_ipackets[i] = vq->stats.pkts;
929                 rx_total += stats->q_ipackets[i];
930
931                 stats->q_ibytes[i] = vq->stats.bytes;
932                 rx_total_bytes += stats->q_ibytes[i];
933         }
934
935         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
936                         i < dev->data->nb_tx_queues; i++) {
937                 if (dev->data->tx_queues[i] == NULL)
938                         continue;
939                 vq = dev->data->tx_queues[i];
940                 stats->q_opackets[i] = vq->stats.pkts;
941                 tx_missed_total += vq->stats.missed_pkts;
942                 tx_total += stats->q_opackets[i];
943
944                 stats->q_obytes[i] = vq->stats.bytes;
945                 tx_total_bytes += stats->q_obytes[i];
946         }
947
948         stats->ipackets = rx_total;
949         stats->opackets = tx_total;
950         stats->oerrors = tx_missed_total;
951         stats->ibytes = rx_total_bytes;
952         stats->obytes = tx_total_bytes;
953
954         return 0;
955 }
956
957 static void
958 eth_stats_reset(struct rte_eth_dev *dev)
959 {
960         struct vhost_queue *vq;
961         unsigned i;
962
963         for (i = 0; i < dev->data->nb_rx_queues; i++) {
964                 if (dev->data->rx_queues[i] == NULL)
965                         continue;
966                 vq = dev->data->rx_queues[i];
967                 vq->stats.pkts = 0;
968                 vq->stats.bytes = 0;
969         }
970         for (i = 0; i < dev->data->nb_tx_queues; i++) {
971                 if (dev->data->tx_queues[i] == NULL)
972                         continue;
973                 vq = dev->data->tx_queues[i];
974                 vq->stats.pkts = 0;
975                 vq->stats.bytes = 0;
976                 vq->stats.missed_pkts = 0;
977         }
978 }
979
980 static void
981 eth_queue_release(void *q)
982 {
983         rte_free(q);
984 }
985
986 static int
987 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
988 {
989         /*
990          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
991          * and releases mbuf, so nothing to cleanup.
992          */
993         return 0;
994 }
995
996 static int
997 eth_link_update(struct rte_eth_dev *dev __rte_unused,
998                 int wait_to_complete __rte_unused)
999 {
1000         return 0;
1001 }
1002
1003 static uint32_t
1004 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1005 {
1006         struct vhost_queue *vq;
1007
1008         vq = dev->data->rx_queues[rx_queue_id];
1009         if (vq == NULL)
1010                 return 0;
1011
1012         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1013 }
1014
1015 static const struct eth_dev_ops ops = {
1016         .dev_start = eth_dev_start,
1017         .dev_stop = eth_dev_stop,
1018         .dev_close = eth_dev_close,
1019         .dev_configure = eth_dev_configure,
1020         .dev_infos_get = eth_dev_info,
1021         .rx_queue_setup = eth_rx_queue_setup,
1022         .tx_queue_setup = eth_tx_queue_setup,
1023         .rx_queue_release = eth_queue_release,
1024         .tx_queue_release = eth_queue_release,
1025         .tx_done_cleanup = eth_tx_done_cleanup,
1026         .rx_queue_count = eth_rx_queue_count,
1027         .link_update = eth_link_update,
1028         .stats_get = eth_stats_get,
1029         .stats_reset = eth_stats_reset,
1030         .xstats_reset = vhost_dev_xstats_reset,
1031         .xstats_get = vhost_dev_xstats_get,
1032         .xstats_get_names = vhost_dev_xstats_get_names,
1033 };
1034
1035 static struct rte_vdev_driver pmd_vhost_drv;
1036
1037 static int
1038 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1039         int16_t queues, const unsigned int numa_node, uint64_t flags)
1040 {
1041         const char *name = rte_vdev_device_name(dev);
1042         struct rte_eth_dev_data *data = NULL;
1043         struct pmd_internal *internal = NULL;
1044         struct rte_eth_dev *eth_dev = NULL;
1045         struct ether_addr *eth_addr = NULL;
1046         struct rte_vhost_vring_state *vring_state = NULL;
1047         struct internal_list *list = NULL;
1048
1049         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1050                 numa_node);
1051
1052         /* now do all data allocation - for eth_dev structure and internal
1053          * (private) data
1054          */
1055         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1056         if (data == NULL)
1057                 goto error;
1058
1059         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1060         if (list == NULL)
1061                 goto error;
1062
1063         /* reserve an ethdev entry */
1064         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1065         if (eth_dev == NULL)
1066                 goto error;
1067
1068         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1069         if (eth_addr == NULL)
1070                 goto error;
1071         *eth_addr = base_eth_addr;
1072         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1073
1074         vring_state = rte_zmalloc_socket(name,
1075                         sizeof(*vring_state), 0, numa_node);
1076         if (vring_state == NULL)
1077                 goto error;
1078
1079         /* now put it all together
1080          * - store queue data in internal,
1081          * - point eth_dev_data to internals
1082          * - and point eth_dev structure to new eth_dev_data structure
1083          */
1084         internal = eth_dev->data->dev_private;
1085         internal->dev_name = strdup(name);
1086         if (internal->dev_name == NULL)
1087                 goto error;
1088         internal->iface_name = strdup(iface_name);
1089         if (internal->iface_name == NULL)
1090                 goto error;
1091
1092         list->eth_dev = eth_dev;
1093         pthread_mutex_lock(&internal_list_lock);
1094         TAILQ_INSERT_TAIL(&internal_list, list, next);
1095         pthread_mutex_unlock(&internal_list_lock);
1096
1097         rte_spinlock_init(&vring_state->lock);
1098         vring_states[eth_dev->data->port_id] = vring_state;
1099
1100         /* We'll replace the 'data' originally allocated by eth_dev. So the
1101          * vhost PMD resources won't be shared between multi processes.
1102          */
1103         rte_memcpy(data, eth_dev->data, sizeof(*data));
1104         eth_dev->data = data;
1105
1106         data->nb_rx_queues = queues;
1107         data->nb_tx_queues = queues;
1108         internal->max_queues = queues;
1109         data->dev_link = pmd_link;
1110         data->mac_addrs = eth_addr;
1111         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1112
1113         eth_dev->dev_ops = &ops;
1114
1115         /* finally assign rx and tx ops */
1116         eth_dev->rx_pkt_burst = eth_vhost_rx;
1117         eth_dev->tx_pkt_burst = eth_vhost_tx;
1118
1119         if (rte_vhost_driver_register(iface_name, flags))
1120                 goto error;
1121
1122         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1123                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1124                 goto error;
1125         }
1126
1127         if (rte_vhost_driver_start(iface_name) < 0) {
1128                 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1129                         iface_name);
1130                 goto error;
1131         }
1132
1133         return data->port_id;
1134
1135 error:
1136         if (internal) {
1137                 free(internal->iface_name);
1138                 free(internal->dev_name);
1139         }
1140         rte_free(vring_state);
1141         rte_free(eth_addr);
1142         if (eth_dev)
1143                 rte_eth_dev_release_port(eth_dev);
1144         rte_free(internal);
1145         rte_free(list);
1146         rte_free(data);
1147
1148         return -1;
1149 }
1150
1151 static inline int
1152 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1153 {
1154         const char **iface_name = extra_args;
1155
1156         if (value == NULL)
1157                 return -1;
1158
1159         *iface_name = value;
1160
1161         return 0;
1162 }
1163
1164 static inline int
1165 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1166 {
1167         uint16_t *n = extra_args;
1168
1169         if (value == NULL || extra_args == NULL)
1170                 return -EINVAL;
1171
1172         *n = (uint16_t)strtoul(value, NULL, 0);
1173         if (*n == USHRT_MAX && errno == ERANGE)
1174                 return -1;
1175
1176         return 0;
1177 }
1178
1179 static int
1180 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1181 {
1182         struct rte_kvargs *kvlist = NULL;
1183         int ret = 0;
1184         char *iface_name;
1185         uint16_t queues;
1186         uint64_t flags = 0;
1187         int client_mode = 0;
1188         int dequeue_zero_copy = 0;
1189         int iommu_support = 0;
1190
1191         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1192                 rte_vdev_device_name(dev));
1193
1194         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1195         if (kvlist == NULL)
1196                 return -1;
1197
1198         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1199                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1200                                          &open_iface, &iface_name);
1201                 if (ret < 0)
1202                         goto out_free;
1203         } else {
1204                 ret = -1;
1205                 goto out_free;
1206         }
1207
1208         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1209                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1210                                          &open_int, &queues);
1211                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1212                         goto out_free;
1213
1214         } else
1215                 queues = 1;
1216
1217         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1218                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1219                                          &open_int, &client_mode);
1220                 if (ret < 0)
1221                         goto out_free;
1222
1223                 if (client_mode)
1224                         flags |= RTE_VHOST_USER_CLIENT;
1225         }
1226
1227         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1228                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1229                                          &open_int, &dequeue_zero_copy);
1230                 if (ret < 0)
1231                         goto out_free;
1232
1233                 if (dequeue_zero_copy)
1234                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1235         }
1236
1237         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1238                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1239                                          &open_int, &iommu_support);
1240                 if (ret < 0)
1241                         goto out_free;
1242
1243                 if (iommu_support)
1244                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1245         }
1246
1247         if (dev->device.numa_node == SOCKET_ID_ANY)
1248                 dev->device.numa_node = rte_socket_id();
1249
1250         eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1251                 flags);
1252
1253 out_free:
1254         rte_kvargs_free(kvlist);
1255         return ret;
1256 }
1257
1258 static int
1259 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1260 {
1261         const char *name;
1262         struct rte_eth_dev *eth_dev = NULL;
1263
1264         name = rte_vdev_device_name(dev);
1265         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1266
1267         /* find an ethdev entry */
1268         eth_dev = rte_eth_dev_allocated(name);
1269         if (eth_dev == NULL)
1270                 return -ENODEV;
1271
1272         eth_dev_close(eth_dev);
1273
1274         rte_free(vring_states[eth_dev->data->port_id]);
1275         vring_states[eth_dev->data->port_id] = NULL;
1276
1277         rte_free(eth_dev->data);
1278
1279         rte_eth_dev_release_port(eth_dev);
1280
1281         return 0;
1282 }
1283
1284 static struct rte_vdev_driver pmd_vhost_drv = {
1285         .probe = rte_pmd_vhost_probe,
1286         .remove = rte_pmd_vhost_remove,
1287 };
1288
1289 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1290 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1291 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1292         "iface=<ifc> "
1293         "queues=<int>");