net/vhost: check creation failure
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 IGEL Co., Ltd.
3  * Copyright(c) 2016-2018 Intel Corporation
4  */
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
18
19 #include "rte_eth_vhost.h"
20
21 static int vhost_logtype;
22
23 #define VHOST_LOG(level, ...) \
24         rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
25
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
27
28 #define ETH_VHOST_IFACE_ARG             "iface"
29 #define ETH_VHOST_QUEUES_ARG            "queues"
30 #define ETH_VHOST_CLIENT_ARG            "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT      "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define VHOST_MAX_PKT_BURST 32
36
37 static const char *valid_arguments[] = {
38         ETH_VHOST_IFACE_ARG,
39         ETH_VHOST_QUEUES_ARG,
40         ETH_VHOST_CLIENT_ARG,
41         ETH_VHOST_DEQUEUE_ZERO_COPY,
42         ETH_VHOST_IOMMU_SUPPORT,
43         ETH_VHOST_POSTCOPY_SUPPORT,
44         ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
45         NULL
46 };
47
48 static struct rte_ether_addr base_eth_addr = {
49         .addr_bytes = {
50                 0x56 /* V */,
51                 0x48 /* H */,
52                 0x4F /* O */,
53                 0x53 /* S */,
54                 0x54 /* T */,
55                 0x00
56         }
57 };
58
59 enum vhost_xstats_pkts {
60         VHOST_UNDERSIZE_PKT = 0,
61         VHOST_64_PKT,
62         VHOST_65_TO_127_PKT,
63         VHOST_128_TO_255_PKT,
64         VHOST_256_TO_511_PKT,
65         VHOST_512_TO_1023_PKT,
66         VHOST_1024_TO_1522_PKT,
67         VHOST_1523_TO_MAX_PKT,
68         VHOST_BROADCAST_PKT,
69         VHOST_MULTICAST_PKT,
70         VHOST_UNICAST_PKT,
71         VHOST_ERRORS_PKT,
72         VHOST_ERRORS_FRAGMENTED,
73         VHOST_ERRORS_JABBER,
74         VHOST_UNKNOWN_PROTOCOL,
75         VHOST_XSTATS_MAX,
76 };
77
78 struct vhost_stats {
79         uint64_t pkts;
80         uint64_t bytes;
81         uint64_t missed_pkts;
82         uint64_t xstats[VHOST_XSTATS_MAX];
83 };
84
85 struct vhost_queue {
86         int vid;
87         rte_atomic32_t allow_queuing;
88         rte_atomic32_t while_queuing;
89         struct pmd_internal *internal;
90         struct rte_mempool *mb_pool;
91         uint16_t port;
92         uint16_t virtqueue_id;
93         struct vhost_stats stats;
94 };
95
96 struct pmd_internal {
97         rte_atomic32_t dev_attached;
98         char *dev_name;
99         char *iface_name;
100         uint16_t max_queues;
101         int vid;
102         rte_atomic32_t started;
103         uint8_t vlan_strip;
104 };
105
106 struct internal_list {
107         TAILQ_ENTRY(internal_list) next;
108         struct rte_eth_dev *eth_dev;
109 };
110
111 TAILQ_HEAD(internal_list_head, internal_list);
112 static struct internal_list_head internal_list =
113         TAILQ_HEAD_INITIALIZER(internal_list);
114
115 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
116
117 static struct rte_eth_link pmd_link = {
118                 .link_speed = 10000,
119                 .link_duplex = ETH_LINK_FULL_DUPLEX,
120                 .link_status = ETH_LINK_DOWN
121 };
122
123 struct rte_vhost_vring_state {
124         rte_spinlock_t lock;
125
126         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
127         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
128         unsigned int index;
129         unsigned int max_vring;
130 };
131
132 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
133
134 #define VHOST_XSTATS_NAME_SIZE 64
135
136 struct vhost_xstats_name_off {
137         char name[VHOST_XSTATS_NAME_SIZE];
138         uint64_t offset;
139 };
140
141 /* [rx]_is prepended to the name string here */
142 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
143         {"good_packets",
144          offsetof(struct vhost_queue, stats.pkts)},
145         {"total_bytes",
146          offsetof(struct vhost_queue, stats.bytes)},
147         {"missed_pkts",
148          offsetof(struct vhost_queue, stats.missed_pkts)},
149         {"broadcast_packets",
150          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
151         {"multicast_packets",
152          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
153         {"unicast_packets",
154          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
155          {"undersize_packets",
156          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
157         {"size_64_packets",
158          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
159         {"size_65_to_127_packets",
160          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
161         {"size_128_to_255_packets",
162          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
163         {"size_256_to_511_packets",
164          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
165         {"size_512_to_1023_packets",
166          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
167         {"size_1024_to_1522_packets",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
169         {"size_1523_to_max_packets",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
171         {"errors_with_bad_CRC",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
173         {"fragmented_errors",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
175         {"jabber_errors",
176          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
177         {"unknown_protos_packets",
178          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
179 };
180
181 /* [tx]_ is prepended to the name string here */
182 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
183         {"good_packets",
184          offsetof(struct vhost_queue, stats.pkts)},
185         {"total_bytes",
186          offsetof(struct vhost_queue, stats.bytes)},
187         {"missed_pkts",
188          offsetof(struct vhost_queue, stats.missed_pkts)},
189         {"broadcast_packets",
190          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
191         {"multicast_packets",
192          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
193         {"unicast_packets",
194          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
195         {"undersize_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
197         {"size_64_packets",
198          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
199         {"size_65_to_127_packets",
200          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
201         {"size_128_to_255_packets",
202          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
203         {"size_256_to_511_packets",
204          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
205         {"size_512_to_1023_packets",
206          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
207         {"size_1024_to_1522_packets",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
209         {"size_1523_to_max_packets",
210          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
211         {"errors_with_bad_CRC",
212          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
213 };
214
215 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
216                                 sizeof(vhost_rxport_stat_strings[0]))
217
218 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
219                                 sizeof(vhost_txport_stat_strings[0]))
220
221 static int
222 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
223 {
224         struct vhost_queue *vq = NULL;
225         unsigned int i = 0;
226
227         for (i = 0; i < dev->data->nb_rx_queues; i++) {
228                 vq = dev->data->rx_queues[i];
229                 if (!vq)
230                         continue;
231                 memset(&vq->stats, 0, sizeof(vq->stats));
232         }
233         for (i = 0; i < dev->data->nb_tx_queues; i++) {
234                 vq = dev->data->tx_queues[i];
235                 if (!vq)
236                         continue;
237                 memset(&vq->stats, 0, sizeof(vq->stats));
238         }
239
240         return 0;
241 }
242
243 static int
244 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
245                            struct rte_eth_xstat_name *xstats_names,
246                            unsigned int limit __rte_unused)
247 {
248         unsigned int t = 0;
249         int count = 0;
250         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
251
252         if (!xstats_names)
253                 return nstats;
254         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
255                 snprintf(xstats_names[count].name,
256                          sizeof(xstats_names[count].name),
257                          "rx_%s", vhost_rxport_stat_strings[t].name);
258                 count++;
259         }
260         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
261                 snprintf(xstats_names[count].name,
262                          sizeof(xstats_names[count].name),
263                          "tx_%s", vhost_txport_stat_strings[t].name);
264                 count++;
265         }
266         return count;
267 }
268
269 static int
270 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
271                      unsigned int n)
272 {
273         unsigned int i;
274         unsigned int t;
275         unsigned int count = 0;
276         struct vhost_queue *vq = NULL;
277         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
278
279         if (n < nxstats)
280                 return nxstats;
281
282         for (i = 0; i < dev->data->nb_rx_queues; i++) {
283                 vq = dev->data->rx_queues[i];
284                 if (!vq)
285                         continue;
286                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
287                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
288                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
289         }
290         for (i = 0; i < dev->data->nb_tx_queues; i++) {
291                 vq = dev->data->tx_queues[i];
292                 if (!vq)
293                         continue;
294                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
295                                 + vq->stats.missed_pkts
296                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
297                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
298         }
299         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
300                 xstats[count].value = 0;
301                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
302                         vq = dev->data->rx_queues[i];
303                         if (!vq)
304                                 continue;
305                         xstats[count].value +=
306                                 *(uint64_t *)(((char *)vq)
307                                 + vhost_rxport_stat_strings[t].offset);
308                 }
309                 xstats[count].id = count;
310                 count++;
311         }
312         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
313                 xstats[count].value = 0;
314                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
315                         vq = dev->data->tx_queues[i];
316                         if (!vq)
317                                 continue;
318                         xstats[count].value +=
319                                 *(uint64_t *)(((char *)vq)
320                                 + vhost_txport_stat_strings[t].offset);
321                 }
322                 xstats[count].id = count;
323                 count++;
324         }
325         return count;
326 }
327
328 static inline void
329 vhost_count_multicast_broadcast(struct vhost_queue *vq,
330                                 struct rte_mbuf *mbuf)
331 {
332         struct rte_ether_addr *ea = NULL;
333         struct vhost_stats *pstats = &vq->stats;
334
335         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
336         if (rte_is_multicast_ether_addr(ea)) {
337                 if (rte_is_broadcast_ether_addr(ea))
338                         pstats->xstats[VHOST_BROADCAST_PKT]++;
339                 else
340                         pstats->xstats[VHOST_MULTICAST_PKT]++;
341         }
342 }
343
344 static void
345 vhost_update_packet_xstats(struct vhost_queue *vq,
346                            struct rte_mbuf **bufs,
347                            uint16_t count)
348 {
349         uint32_t pkt_len = 0;
350         uint64_t i = 0;
351         uint64_t index;
352         struct vhost_stats *pstats = &vq->stats;
353
354         for (i = 0; i < count ; i++) {
355                 pkt_len = bufs[i]->pkt_len;
356                 if (pkt_len == 64) {
357                         pstats->xstats[VHOST_64_PKT]++;
358                 } else if (pkt_len > 64 && pkt_len < 1024) {
359                         index = (sizeof(pkt_len) * 8)
360                                 - __builtin_clz(pkt_len) - 5;
361                         pstats->xstats[index]++;
362                 } else {
363                         if (pkt_len < 64)
364                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
365                         else if (pkt_len <= 1522)
366                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
367                         else if (pkt_len > 1522)
368                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
369                 }
370                 vhost_count_multicast_broadcast(vq, bufs[i]);
371         }
372 }
373
374 static uint16_t
375 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
376 {
377         struct vhost_queue *r = q;
378         uint16_t i, nb_rx = 0;
379         uint16_t nb_receive = nb_bufs;
380
381         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
382                 return 0;
383
384         rte_atomic32_set(&r->while_queuing, 1);
385
386         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
387                 goto out;
388
389         /* Dequeue packets from guest TX queue */
390         while (nb_receive) {
391                 uint16_t nb_pkts;
392                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
393                                                  VHOST_MAX_PKT_BURST);
394
395                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
396                                                   r->mb_pool, &bufs[nb_rx],
397                                                   num);
398
399                 nb_rx += nb_pkts;
400                 nb_receive -= nb_pkts;
401                 if (nb_pkts < num)
402                         break;
403         }
404
405         r->stats.pkts += nb_rx;
406
407         for (i = 0; likely(i < nb_rx); i++) {
408                 bufs[i]->port = r->port;
409                 bufs[i]->vlan_tci = 0;
410
411                 if (r->internal->vlan_strip)
412                         rte_vlan_strip(bufs[i]);
413
414                 r->stats.bytes += bufs[i]->pkt_len;
415         }
416
417         vhost_update_packet_xstats(r, bufs, nb_rx);
418
419 out:
420         rte_atomic32_set(&r->while_queuing, 0);
421
422         return nb_rx;
423 }
424
425 static uint16_t
426 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
427 {
428         struct vhost_queue *r = q;
429         uint16_t i, nb_tx = 0;
430         uint16_t nb_send = 0;
431
432         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
433                 return 0;
434
435         rte_atomic32_set(&r->while_queuing, 1);
436
437         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
438                 goto out;
439
440         for (i = 0; i < nb_bufs; i++) {
441                 struct rte_mbuf *m = bufs[i];
442
443                 /* Do VLAN tag insertion */
444                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
445                         int error = rte_vlan_insert(&m);
446                         if (unlikely(error)) {
447                                 rte_pktmbuf_free(m);
448                                 continue;
449                         }
450                 }
451
452                 bufs[nb_send] = m;
453                 ++nb_send;
454         }
455
456         /* Enqueue packets to guest RX queue */
457         while (nb_send) {
458                 uint16_t nb_pkts;
459                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
460                                                  VHOST_MAX_PKT_BURST);
461
462                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
463                                                   &bufs[nb_tx], num);
464
465                 nb_tx += nb_pkts;
466                 nb_send -= nb_pkts;
467                 if (nb_pkts < num)
468                         break;
469         }
470
471         r->stats.pkts += nb_tx;
472         r->stats.missed_pkts += nb_bufs - nb_tx;
473
474         for (i = 0; likely(i < nb_tx); i++)
475                 r->stats.bytes += bufs[i]->pkt_len;
476
477         vhost_update_packet_xstats(r, bufs, nb_tx);
478
479         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
480          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
481          * are increased when packets are not transmitted successfully.
482          */
483         for (i = nb_tx; i < nb_bufs; i++)
484                 vhost_count_multicast_broadcast(r, bufs[i]);
485
486         for (i = 0; likely(i < nb_tx); i++)
487                 rte_pktmbuf_free(bufs[i]);
488 out:
489         rte_atomic32_set(&r->while_queuing, 0);
490
491         return nb_tx;
492 }
493
494 static int
495 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
496 {
497         struct pmd_internal *internal = dev->data->dev_private;
498         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
499
500         internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
501
502         return 0;
503 }
504
505 static inline struct internal_list *
506 find_internal_resource(char *ifname)
507 {
508         int found = 0;
509         struct internal_list *list;
510         struct pmd_internal *internal;
511
512         if (ifname == NULL)
513                 return NULL;
514
515         pthread_mutex_lock(&internal_list_lock);
516
517         TAILQ_FOREACH(list, &internal_list, next) {
518                 internal = list->eth_dev->data->dev_private;
519                 if (!strcmp(internal->iface_name, ifname)) {
520                         found = 1;
521                         break;
522                 }
523         }
524
525         pthread_mutex_unlock(&internal_list_lock);
526
527         if (!found)
528                 return NULL;
529
530         return list;
531 }
532
533 static int
534 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
535 {
536         struct rte_vhost_vring vring;
537         struct vhost_queue *vq;
538         int ret = 0;
539
540         vq = dev->data->rx_queues[qid];
541         if (!vq) {
542                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
543                 return -1;
544         }
545
546         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
547         if (ret < 0) {
548                 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
549                 return ret;
550         }
551         VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
552         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
553         rte_wmb();
554
555         return ret;
556 }
557
558 static int
559 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
560 {
561         struct rte_vhost_vring vring;
562         struct vhost_queue *vq;
563         int ret = 0;
564
565         vq = dev->data->rx_queues[qid];
566         if (!vq) {
567                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
568                 return -1;
569         }
570
571         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
572         if (ret < 0) {
573                 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
574                 return ret;
575         }
576         VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
577         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
578         rte_wmb();
579
580         return 0;
581 }
582
583 static void
584 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
585 {
586         struct rte_intr_handle *intr_handle = dev->intr_handle;
587
588         if (intr_handle) {
589                 if (intr_handle->intr_vec)
590                         free(intr_handle->intr_vec);
591                 free(intr_handle);
592         }
593
594         dev->intr_handle = NULL;
595 }
596
597 static int
598 eth_vhost_install_intr(struct rte_eth_dev *dev)
599 {
600         struct rte_vhost_vring vring;
601         struct vhost_queue *vq;
602         int count = 0;
603         int nb_rxq = dev->data->nb_rx_queues;
604         int i;
605         int ret;
606
607         /* uninstall firstly if we are reconnecting */
608         if (dev->intr_handle)
609                 eth_vhost_uninstall_intr(dev);
610
611         dev->intr_handle = malloc(sizeof(*dev->intr_handle));
612         if (!dev->intr_handle) {
613                 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
614                 return -ENOMEM;
615         }
616         memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
617
618         dev->intr_handle->efd_counter_size = sizeof(uint64_t);
619
620         dev->intr_handle->intr_vec =
621                 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
622
623         if (!dev->intr_handle->intr_vec) {
624                 VHOST_LOG(ERR,
625                         "Failed to allocate memory for interrupt vector\n");
626                 free(dev->intr_handle);
627                 return -ENOMEM;
628         }
629
630         VHOST_LOG(INFO, "Prepare intr vec\n");
631         for (i = 0; i < nb_rxq; i++) {
632                 vq = dev->data->rx_queues[i];
633                 if (!vq) {
634                         VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
635                         continue;
636                 }
637
638                 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
639                 if (ret < 0) {
640                         VHOST_LOG(INFO,
641                                 "Failed to get rxq-%d's vring, skip!\n", i);
642                         continue;
643                 }
644
645                 if (vring.kickfd < 0) {
646                         VHOST_LOG(INFO,
647                                 "rxq-%d's kickfd is invalid, skip!\n", i);
648                         continue;
649                 }
650                 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
651                 dev->intr_handle->efds[i] = vring.kickfd;
652                 count++;
653                 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
654         }
655
656         dev->intr_handle->nb_efd = count;
657         dev->intr_handle->max_intr = count + 1;
658         dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
659
660         return 0;
661 }
662
663 static void
664 update_queuing_status(struct rte_eth_dev *dev)
665 {
666         struct pmd_internal *internal = dev->data->dev_private;
667         struct vhost_queue *vq;
668         unsigned int i;
669         int allow_queuing = 1;
670
671         if (!dev->data->rx_queues || !dev->data->tx_queues)
672                 return;
673
674         if (rte_atomic32_read(&internal->started) == 0 ||
675             rte_atomic32_read(&internal->dev_attached) == 0)
676                 allow_queuing = 0;
677
678         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
679         for (i = 0; i < dev->data->nb_rx_queues; i++) {
680                 vq = dev->data->rx_queues[i];
681                 if (vq == NULL)
682                         continue;
683                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
684                 while (rte_atomic32_read(&vq->while_queuing))
685                         rte_pause();
686         }
687
688         for (i = 0; i < dev->data->nb_tx_queues; i++) {
689                 vq = dev->data->tx_queues[i];
690                 if (vq == NULL)
691                         continue;
692                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
693                 while (rte_atomic32_read(&vq->while_queuing))
694                         rte_pause();
695         }
696 }
697
698 static void
699 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
700 {
701         struct vhost_queue *vq;
702         int i;
703
704         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
705                 vq = eth_dev->data->rx_queues[i];
706                 if (!vq)
707                         continue;
708                 vq->vid = internal->vid;
709                 vq->internal = internal;
710                 vq->port = eth_dev->data->port_id;
711         }
712         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
713                 vq = eth_dev->data->tx_queues[i];
714                 if (!vq)
715                         continue;
716                 vq->vid = internal->vid;
717                 vq->internal = internal;
718                 vq->port = eth_dev->data->port_id;
719         }
720 }
721
722 static int
723 new_device(int vid)
724 {
725         struct rte_eth_dev *eth_dev;
726         struct internal_list *list;
727         struct pmd_internal *internal;
728         struct rte_eth_conf *dev_conf;
729         unsigned i;
730         char ifname[PATH_MAX];
731 #ifdef RTE_LIBRTE_VHOST_NUMA
732         int newnode;
733 #endif
734
735         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
736         list = find_internal_resource(ifname);
737         if (list == NULL) {
738                 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
739                 return -1;
740         }
741
742         eth_dev = list->eth_dev;
743         internal = eth_dev->data->dev_private;
744         dev_conf = &eth_dev->data->dev_conf;
745
746 #ifdef RTE_LIBRTE_VHOST_NUMA
747         newnode = rte_vhost_get_numa_node(vid);
748         if (newnode >= 0)
749                 eth_dev->data->numa_node = newnode;
750 #endif
751
752         internal->vid = vid;
753         if (rte_atomic32_read(&internal->started) == 1) {
754                 queue_setup(eth_dev, internal);
755
756                 if (dev_conf->intr_conf.rxq) {
757                         if (eth_vhost_install_intr(eth_dev) < 0) {
758                                 VHOST_LOG(INFO,
759                                         "Failed to install interrupt handler.");
760                                         return -1;
761                         }
762                 }
763         } else {
764                 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
765         }
766
767         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
768                 rte_vhost_enable_guest_notification(vid, i, 0);
769
770         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
771
772         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
773
774         rte_atomic32_set(&internal->dev_attached, 1);
775         update_queuing_status(eth_dev);
776
777         VHOST_LOG(INFO, "Vhost device %d created\n", vid);
778
779         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
780
781         return 0;
782 }
783
784 static void
785 destroy_device(int vid)
786 {
787         struct rte_eth_dev *eth_dev;
788         struct pmd_internal *internal;
789         struct vhost_queue *vq;
790         struct internal_list *list;
791         char ifname[PATH_MAX];
792         unsigned i;
793         struct rte_vhost_vring_state *state;
794
795         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
796         list = find_internal_resource(ifname);
797         if (list == NULL) {
798                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
799                 return;
800         }
801         eth_dev = list->eth_dev;
802         internal = eth_dev->data->dev_private;
803
804         rte_atomic32_set(&internal->dev_attached, 0);
805         update_queuing_status(eth_dev);
806
807         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
808
809         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
810                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
811                         vq = eth_dev->data->rx_queues[i];
812                         if (!vq)
813                                 continue;
814                         vq->vid = -1;
815                 }
816                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
817                         vq = eth_dev->data->tx_queues[i];
818                         if (!vq)
819                                 continue;
820                         vq->vid = -1;
821                 }
822         }
823
824         state = vring_states[eth_dev->data->port_id];
825         rte_spinlock_lock(&state->lock);
826         for (i = 0; i <= state->max_vring; i++) {
827                 state->cur[i] = false;
828                 state->seen[i] = false;
829         }
830         state->max_vring = 0;
831         rte_spinlock_unlock(&state->lock);
832
833         VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
834         eth_vhost_uninstall_intr(eth_dev);
835
836         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
837 }
838
839 static int
840 vring_state_changed(int vid, uint16_t vring, int enable)
841 {
842         struct rte_vhost_vring_state *state;
843         struct rte_eth_dev *eth_dev;
844         struct internal_list *list;
845         char ifname[PATH_MAX];
846
847         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
848         list = find_internal_resource(ifname);
849         if (list == NULL) {
850                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
851                 return -1;
852         }
853
854         eth_dev = list->eth_dev;
855         /* won't be NULL */
856         state = vring_states[eth_dev->data->port_id];
857         rte_spinlock_lock(&state->lock);
858         if (state->cur[vring] == enable) {
859                 rte_spinlock_unlock(&state->lock);
860                 return 0;
861         }
862         state->cur[vring] = enable;
863         state->max_vring = RTE_MAX(vring, state->max_vring);
864         rte_spinlock_unlock(&state->lock);
865
866         VHOST_LOG(INFO, "vring%u is %s\n",
867                         vring, enable ? "enabled" : "disabled");
868
869         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
870
871         return 0;
872 }
873
874 static struct vhost_device_ops vhost_ops = {
875         .new_device          = new_device,
876         .destroy_device      = destroy_device,
877         .vring_state_changed = vring_state_changed,
878 };
879
880 int
881 rte_eth_vhost_get_queue_event(uint16_t port_id,
882                 struct rte_eth_vhost_queue_event *event)
883 {
884         struct rte_vhost_vring_state *state;
885         unsigned int i;
886         int idx;
887
888         if (port_id >= RTE_MAX_ETHPORTS) {
889                 VHOST_LOG(ERR, "Invalid port id\n");
890                 return -1;
891         }
892
893         state = vring_states[port_id];
894         if (!state) {
895                 VHOST_LOG(ERR, "Unused port\n");
896                 return -1;
897         }
898
899         rte_spinlock_lock(&state->lock);
900         for (i = 0; i <= state->max_vring; i++) {
901                 idx = state->index++ % (state->max_vring + 1);
902
903                 if (state->cur[idx] != state->seen[idx]) {
904                         state->seen[idx] = state->cur[idx];
905                         event->queue_id = idx / 2;
906                         event->rx = idx & 1;
907                         event->enable = state->cur[idx];
908                         rte_spinlock_unlock(&state->lock);
909                         return 0;
910                 }
911         }
912         rte_spinlock_unlock(&state->lock);
913
914         return -1;
915 }
916
917 int
918 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
919 {
920         struct internal_list *list;
921         struct rte_eth_dev *eth_dev;
922         struct vhost_queue *vq;
923         int vid = -1;
924
925         if (!rte_eth_dev_is_valid_port(port_id))
926                 return -1;
927
928         pthread_mutex_lock(&internal_list_lock);
929
930         TAILQ_FOREACH(list, &internal_list, next) {
931                 eth_dev = list->eth_dev;
932                 if (eth_dev->data->port_id == port_id) {
933                         vq = eth_dev->data->rx_queues[0];
934                         if (vq) {
935                                 vid = vq->vid;
936                         }
937                         break;
938                 }
939         }
940
941         pthread_mutex_unlock(&internal_list_lock);
942
943         return vid;
944 }
945
946 static int
947 eth_dev_start(struct rte_eth_dev *eth_dev)
948 {
949         struct pmd_internal *internal = eth_dev->data->dev_private;
950         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
951
952         queue_setup(eth_dev, internal);
953
954         if (rte_atomic32_read(&internal->dev_attached) == 1) {
955                 if (dev_conf->intr_conf.rxq) {
956                         if (eth_vhost_install_intr(eth_dev) < 0) {
957                                 VHOST_LOG(INFO,
958                                         "Failed to install interrupt handler.");
959                                         return -1;
960                         }
961                 }
962         }
963
964         rte_atomic32_set(&internal->started, 1);
965         update_queuing_status(eth_dev);
966
967         return 0;
968 }
969
970 static void
971 eth_dev_stop(struct rte_eth_dev *dev)
972 {
973         struct pmd_internal *internal = dev->data->dev_private;
974
975         rte_atomic32_set(&internal->started, 0);
976         update_queuing_status(dev);
977 }
978
979 static void
980 eth_dev_close(struct rte_eth_dev *dev)
981 {
982         struct pmd_internal *internal;
983         struct internal_list *list;
984         unsigned int i;
985
986         internal = dev->data->dev_private;
987         if (!internal)
988                 return;
989
990         eth_dev_stop(dev);
991
992         rte_vhost_driver_unregister(internal->iface_name);
993
994         list = find_internal_resource(internal->iface_name);
995         if (!list)
996                 return;
997
998         pthread_mutex_lock(&internal_list_lock);
999         TAILQ_REMOVE(&internal_list, list, next);
1000         pthread_mutex_unlock(&internal_list_lock);
1001         rte_free(list);
1002
1003         if (dev->data->rx_queues)
1004                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1005                         rte_free(dev->data->rx_queues[i]);
1006
1007         if (dev->data->tx_queues)
1008                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1009                         rte_free(dev->data->tx_queues[i]);
1010
1011         free(internal->dev_name);
1012         free(internal->iface_name);
1013         rte_free(internal);
1014
1015         dev->data->dev_private = NULL;
1016
1017         rte_free(vring_states[dev->data->port_id]);
1018         vring_states[dev->data->port_id] = NULL;
1019 }
1020
1021 static int
1022 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1023                    uint16_t nb_rx_desc __rte_unused,
1024                    unsigned int socket_id,
1025                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1026                    struct rte_mempool *mb_pool)
1027 {
1028         struct vhost_queue *vq;
1029
1030         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1031                         RTE_CACHE_LINE_SIZE, socket_id);
1032         if (vq == NULL) {
1033                 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1034                 return -ENOMEM;
1035         }
1036
1037         vq->mb_pool = mb_pool;
1038         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1039         dev->data->rx_queues[rx_queue_id] = vq;
1040
1041         return 0;
1042 }
1043
1044 static int
1045 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1046                    uint16_t nb_tx_desc __rte_unused,
1047                    unsigned int socket_id,
1048                    const struct rte_eth_txconf *tx_conf __rte_unused)
1049 {
1050         struct vhost_queue *vq;
1051
1052         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1053                         RTE_CACHE_LINE_SIZE, socket_id);
1054         if (vq == NULL) {
1055                 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1056                 return -ENOMEM;
1057         }
1058
1059         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1060         dev->data->tx_queues[tx_queue_id] = vq;
1061
1062         return 0;
1063 }
1064
1065 static int
1066 eth_dev_info(struct rte_eth_dev *dev,
1067              struct rte_eth_dev_info *dev_info)
1068 {
1069         struct pmd_internal *internal;
1070
1071         internal = dev->data->dev_private;
1072         if (internal == NULL) {
1073                 VHOST_LOG(ERR, "Invalid device specified\n");
1074                 return -ENODEV;
1075         }
1076
1077         dev_info->max_mac_addrs = 1;
1078         dev_info->max_rx_pktlen = (uint32_t)-1;
1079         dev_info->max_rx_queues = internal->max_queues;
1080         dev_info->max_tx_queues = internal->max_queues;
1081         dev_info->min_rx_bufsize = 0;
1082
1083         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1084                                 DEV_TX_OFFLOAD_VLAN_INSERT;
1085         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1086
1087         return 0;
1088 }
1089
1090 static int
1091 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1092 {
1093         unsigned i;
1094         unsigned long rx_total = 0, tx_total = 0;
1095         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1096         struct vhost_queue *vq;
1097
1098         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1099                         i < dev->data->nb_rx_queues; i++) {
1100                 if (dev->data->rx_queues[i] == NULL)
1101                         continue;
1102                 vq = dev->data->rx_queues[i];
1103                 stats->q_ipackets[i] = vq->stats.pkts;
1104                 rx_total += stats->q_ipackets[i];
1105
1106                 stats->q_ibytes[i] = vq->stats.bytes;
1107                 rx_total_bytes += stats->q_ibytes[i];
1108         }
1109
1110         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1111                         i < dev->data->nb_tx_queues; i++) {
1112                 if (dev->data->tx_queues[i] == NULL)
1113                         continue;
1114                 vq = dev->data->tx_queues[i];
1115                 stats->q_opackets[i] = vq->stats.pkts;
1116                 tx_total += stats->q_opackets[i];
1117
1118                 stats->q_obytes[i] = vq->stats.bytes;
1119                 tx_total_bytes += stats->q_obytes[i];
1120         }
1121
1122         stats->ipackets = rx_total;
1123         stats->opackets = tx_total;
1124         stats->ibytes = rx_total_bytes;
1125         stats->obytes = tx_total_bytes;
1126
1127         return 0;
1128 }
1129
1130 static int
1131 eth_stats_reset(struct rte_eth_dev *dev)
1132 {
1133         struct vhost_queue *vq;
1134         unsigned i;
1135
1136         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1137                 if (dev->data->rx_queues[i] == NULL)
1138                         continue;
1139                 vq = dev->data->rx_queues[i];
1140                 vq->stats.pkts = 0;
1141                 vq->stats.bytes = 0;
1142         }
1143         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1144                 if (dev->data->tx_queues[i] == NULL)
1145                         continue;
1146                 vq = dev->data->tx_queues[i];
1147                 vq->stats.pkts = 0;
1148                 vq->stats.bytes = 0;
1149                 vq->stats.missed_pkts = 0;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static void
1156 eth_queue_release(void *q)
1157 {
1158         rte_free(q);
1159 }
1160
1161 static int
1162 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1163 {
1164         /*
1165          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1166          * and releases mbuf, so nothing to cleanup.
1167          */
1168         return 0;
1169 }
1170
1171 static int
1172 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1173                 int wait_to_complete __rte_unused)
1174 {
1175         return 0;
1176 }
1177
1178 static uint32_t
1179 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1180 {
1181         struct vhost_queue *vq;
1182
1183         vq = dev->data->rx_queues[rx_queue_id];
1184         if (vq == NULL)
1185                 return 0;
1186
1187         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1188 }
1189
1190 static const struct eth_dev_ops ops = {
1191         .dev_start = eth_dev_start,
1192         .dev_stop = eth_dev_stop,
1193         .dev_close = eth_dev_close,
1194         .dev_configure = eth_dev_configure,
1195         .dev_infos_get = eth_dev_info,
1196         .rx_queue_setup = eth_rx_queue_setup,
1197         .tx_queue_setup = eth_tx_queue_setup,
1198         .rx_queue_release = eth_queue_release,
1199         .tx_queue_release = eth_queue_release,
1200         .tx_done_cleanup = eth_tx_done_cleanup,
1201         .rx_queue_count = eth_rx_queue_count,
1202         .link_update = eth_link_update,
1203         .stats_get = eth_stats_get,
1204         .stats_reset = eth_stats_reset,
1205         .xstats_reset = vhost_dev_xstats_reset,
1206         .xstats_get = vhost_dev_xstats_get,
1207         .xstats_get_names = vhost_dev_xstats_get_names,
1208         .rx_queue_intr_enable = eth_rxq_intr_enable,
1209         .rx_queue_intr_disable = eth_rxq_intr_disable,
1210 };
1211
1212 static int
1213 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1214         int16_t queues, const unsigned int numa_node, uint64_t flags,
1215         uint64_t disable_flags)
1216 {
1217         const char *name = rte_vdev_device_name(dev);
1218         struct rte_eth_dev_data *data;
1219         struct pmd_internal *internal = NULL;
1220         struct rte_eth_dev *eth_dev = NULL;
1221         struct rte_ether_addr *eth_addr = NULL;
1222         struct rte_vhost_vring_state *vring_state = NULL;
1223         struct internal_list *list = NULL;
1224
1225         VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1226                 numa_node);
1227
1228         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1229         if (list == NULL)
1230                 goto error;
1231
1232         /* reserve an ethdev entry */
1233         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1234         if (eth_dev == NULL)
1235                 goto error;
1236         data = eth_dev->data;
1237
1238         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1239         if (eth_addr == NULL)
1240                 goto error;
1241         data->mac_addrs = eth_addr;
1242         *eth_addr = base_eth_addr;
1243         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1244
1245         vring_state = rte_zmalloc_socket(name,
1246                         sizeof(*vring_state), 0, numa_node);
1247         if (vring_state == NULL)
1248                 goto error;
1249
1250         /* now put it all together
1251          * - store queue data in internal,
1252          * - point eth_dev_data to internals
1253          * - and point eth_dev structure to new eth_dev_data structure
1254          */
1255         internal = eth_dev->data->dev_private;
1256         internal->dev_name = strdup(name);
1257         if (internal->dev_name == NULL)
1258                 goto error;
1259         internal->iface_name = strdup(iface_name);
1260         if (internal->iface_name == NULL)
1261                 goto error;
1262
1263         list->eth_dev = eth_dev;
1264         pthread_mutex_lock(&internal_list_lock);
1265         TAILQ_INSERT_TAIL(&internal_list, list, next);
1266         pthread_mutex_unlock(&internal_list_lock);
1267
1268         rte_spinlock_init(&vring_state->lock);
1269         vring_states[eth_dev->data->port_id] = vring_state;
1270
1271         data->nb_rx_queues = queues;
1272         data->nb_tx_queues = queues;
1273         internal->max_queues = queues;
1274         internal->vid = -1;
1275         data->dev_link = pmd_link;
1276         data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1277
1278         eth_dev->dev_ops = &ops;
1279
1280         /* finally assign rx and tx ops */
1281         eth_dev->rx_pkt_burst = eth_vhost_rx;
1282         eth_dev->tx_pkt_burst = eth_vhost_tx;
1283
1284         if (rte_vhost_driver_register(iface_name, flags))
1285                 goto error;
1286
1287         if (disable_flags) {
1288                 if (rte_vhost_driver_disable_features(iface_name,
1289                                         disable_flags))
1290                         goto error;
1291         }
1292
1293         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1294                 VHOST_LOG(ERR, "Can't register callbacks\n");
1295                 goto error;
1296         }
1297
1298         if (rte_vhost_driver_start(iface_name) < 0) {
1299                 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1300                         iface_name);
1301                 goto error;
1302         }
1303
1304         rte_eth_dev_probing_finish(eth_dev);
1305         return 0;
1306
1307 error:
1308         if (internal) {
1309                 free(internal->iface_name);
1310                 free(internal->dev_name);
1311         }
1312         rte_free(vring_state);
1313         rte_eth_dev_release_port(eth_dev);
1314         rte_free(list);
1315
1316         return -1;
1317 }
1318
1319 static inline int
1320 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1321 {
1322         const char **iface_name = extra_args;
1323
1324         if (value == NULL)
1325                 return -1;
1326
1327         *iface_name = value;
1328
1329         return 0;
1330 }
1331
1332 static inline int
1333 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1334 {
1335         uint16_t *n = extra_args;
1336
1337         if (value == NULL || extra_args == NULL)
1338                 return -EINVAL;
1339
1340         *n = (uint16_t)strtoul(value, NULL, 0);
1341         if (*n == USHRT_MAX && errno == ERANGE)
1342                 return -1;
1343
1344         return 0;
1345 }
1346
1347 static int
1348 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1349 {
1350         struct rte_kvargs *kvlist = NULL;
1351         int ret = 0;
1352         char *iface_name;
1353         uint16_t queues;
1354         uint64_t flags = 0;
1355         uint64_t disable_flags = 0;
1356         int client_mode = 0;
1357         int dequeue_zero_copy = 0;
1358         int iommu_support = 0;
1359         int postcopy_support = 0;
1360         int tso = 0;
1361         struct rte_eth_dev *eth_dev;
1362         const char *name = rte_vdev_device_name(dev);
1363
1364         VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1365
1366         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1367                 eth_dev = rte_eth_dev_attach_secondary(name);
1368                 if (!eth_dev) {
1369                         VHOST_LOG(ERR, "Failed to probe %s\n", name);
1370                         return -1;
1371                 }
1372                 /* TODO: request info from primary to set up Rx and Tx */
1373                 eth_dev->dev_ops = &ops;
1374                 eth_dev->device = &dev->device;
1375                 rte_eth_dev_probing_finish(eth_dev);
1376                 return 0;
1377         }
1378
1379         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1380         if (kvlist == NULL)
1381                 return -1;
1382
1383         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1384                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1385                                          &open_iface, &iface_name);
1386                 if (ret < 0)
1387                         goto out_free;
1388         } else {
1389                 ret = -1;
1390                 goto out_free;
1391         }
1392
1393         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1394                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1395                                          &open_int, &queues);
1396                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1397                         goto out_free;
1398
1399         } else
1400                 queues = 1;
1401
1402         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1403                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1404                                          &open_int, &client_mode);
1405                 if (ret < 0)
1406                         goto out_free;
1407
1408                 if (client_mode)
1409                         flags |= RTE_VHOST_USER_CLIENT;
1410         }
1411
1412         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1413                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1414                                          &open_int, &dequeue_zero_copy);
1415                 if (ret < 0)
1416                         goto out_free;
1417
1418                 if (dequeue_zero_copy)
1419                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1420         }
1421
1422         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1423                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1424                                          &open_int, &iommu_support);
1425                 if (ret < 0)
1426                         goto out_free;
1427
1428                 if (iommu_support)
1429                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1430         }
1431
1432         if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1433                 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1434                                          &open_int, &postcopy_support);
1435                 if (ret < 0)
1436                         goto out_free;
1437
1438                 if (postcopy_support)
1439                         flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1440         }
1441
1442         if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1443                 ret = rte_kvargs_process(kvlist,
1444                                 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1445                                 &open_int, &tso);
1446                 if (ret < 0)
1447                         goto out_free;
1448
1449                 if (tso == 0) {
1450                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1451                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1452                 }
1453         }
1454
1455         if (dev->device.numa_node == SOCKET_ID_ANY)
1456                 dev->device.numa_node = rte_socket_id();
1457
1458         ret = eth_dev_vhost_create(dev, iface_name, queues,
1459                                    dev->device.numa_node, flags, disable_flags);
1460         if (ret == -1)
1461                 VHOST_LOG(ERR, "Failed to create %s\n", name);
1462
1463 out_free:
1464         rte_kvargs_free(kvlist);
1465         return ret;
1466 }
1467
1468 static int
1469 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1470 {
1471         const char *name;
1472         struct rte_eth_dev *eth_dev = NULL;
1473
1474         name = rte_vdev_device_name(dev);
1475         VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1476
1477         /* find an ethdev entry */
1478         eth_dev = rte_eth_dev_allocated(name);
1479         if (eth_dev == NULL)
1480                 return 0;
1481
1482         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1483                 return rte_eth_dev_release_port(eth_dev);
1484
1485         eth_dev_close(eth_dev);
1486
1487         rte_eth_dev_release_port(eth_dev);
1488
1489         return 0;
1490 }
1491
1492 static struct rte_vdev_driver pmd_vhost_drv = {
1493         .probe = rte_pmd_vhost_probe,
1494         .remove = rte_pmd_vhost_remove,
1495 };
1496
1497 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1498 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1499 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1500         "iface=<ifc> "
1501         "queues=<int> "
1502         "client=<0|1> "
1503         "dequeue-zero-copy=<0|1> "
1504         "iommu-support=<0|1> "
1505         "postcopy-support=<0|1> "
1506         "tso=<0|1>");
1507
1508 RTE_INIT(vhost_init_log)
1509 {
1510         vhost_logtype = rte_log_register("pmd.net.vhost");
1511         if (vhost_logtype >= 0)
1512                 rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);
1513 }