drivers/net: enable hotplug on secondary process
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 IGEL Co., Ltd.
3  * Copyright(c) 2016-2018 Intel Corporation
4  */
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
18
19 #include "rte_eth_vhost.h"
20
21 static int vhost_logtype;
22
23 #define VHOST_LOG(level, ...) \
24         rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
25
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
27
28 #define ETH_VHOST_IFACE_ARG             "iface"
29 #define ETH_VHOST_QUEUES_ARG            "queues"
30 #define ETH_VHOST_CLIENT_ARG            "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
33 #define VHOST_MAX_PKT_BURST 32
34
35 static const char *valid_arguments[] = {
36         ETH_VHOST_IFACE_ARG,
37         ETH_VHOST_QUEUES_ARG,
38         ETH_VHOST_CLIENT_ARG,
39         ETH_VHOST_DEQUEUE_ZERO_COPY,
40         ETH_VHOST_IOMMU_SUPPORT,
41         NULL
42 };
43
44 static struct ether_addr base_eth_addr = {
45         .addr_bytes = {
46                 0x56 /* V */,
47                 0x48 /* H */,
48                 0x4F /* O */,
49                 0x53 /* S */,
50                 0x54 /* T */,
51                 0x00
52         }
53 };
54
55 enum vhost_xstats_pkts {
56         VHOST_UNDERSIZE_PKT = 0,
57         VHOST_64_PKT,
58         VHOST_65_TO_127_PKT,
59         VHOST_128_TO_255_PKT,
60         VHOST_256_TO_511_PKT,
61         VHOST_512_TO_1023_PKT,
62         VHOST_1024_TO_1522_PKT,
63         VHOST_1523_TO_MAX_PKT,
64         VHOST_BROADCAST_PKT,
65         VHOST_MULTICAST_PKT,
66         VHOST_UNICAST_PKT,
67         VHOST_ERRORS_PKT,
68         VHOST_ERRORS_FRAGMENTED,
69         VHOST_ERRORS_JABBER,
70         VHOST_UNKNOWN_PROTOCOL,
71         VHOST_XSTATS_MAX,
72 };
73
74 struct vhost_stats {
75         uint64_t pkts;
76         uint64_t bytes;
77         uint64_t missed_pkts;
78         uint64_t xstats[VHOST_XSTATS_MAX];
79 };
80
81 struct vhost_queue {
82         int vid;
83         rte_atomic32_t allow_queuing;
84         rte_atomic32_t while_queuing;
85         struct pmd_internal *internal;
86         struct rte_mempool *mb_pool;
87         uint16_t port;
88         uint16_t virtqueue_id;
89         struct vhost_stats stats;
90 };
91
92 struct pmd_internal {
93         rte_atomic32_t dev_attached;
94         char *dev_name;
95         char *iface_name;
96         uint16_t max_queues;
97         int vid;
98         rte_atomic32_t started;
99         uint8_t vlan_strip;
100 };
101
102 struct internal_list {
103         TAILQ_ENTRY(internal_list) next;
104         struct rte_eth_dev *eth_dev;
105 };
106
107 TAILQ_HEAD(internal_list_head, internal_list);
108 static struct internal_list_head internal_list =
109         TAILQ_HEAD_INITIALIZER(internal_list);
110
111 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
112
113 static struct rte_eth_link pmd_link = {
114                 .link_speed = 10000,
115                 .link_duplex = ETH_LINK_FULL_DUPLEX,
116                 .link_status = ETH_LINK_DOWN
117 };
118
119 struct rte_vhost_vring_state {
120         rte_spinlock_t lock;
121
122         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
123         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
124         unsigned int index;
125         unsigned int max_vring;
126 };
127
128 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
129
130 #define VHOST_XSTATS_NAME_SIZE 64
131
132 struct vhost_xstats_name_off {
133         char name[VHOST_XSTATS_NAME_SIZE];
134         uint64_t offset;
135 };
136
137 /* [rx]_is prepended to the name string here */
138 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
139         {"good_packets",
140          offsetof(struct vhost_queue, stats.pkts)},
141         {"total_bytes",
142          offsetof(struct vhost_queue, stats.bytes)},
143         {"missed_pkts",
144          offsetof(struct vhost_queue, stats.missed_pkts)},
145         {"broadcast_packets",
146          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
147         {"multicast_packets",
148          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
149         {"unicast_packets",
150          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
151          {"undersize_packets",
152          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
153         {"size_64_packets",
154          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
155         {"size_65_to_127_packets",
156          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
157         {"size_128_to_255_packets",
158          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
159         {"size_256_to_511_packets",
160          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
161         {"size_512_to_1023_packets",
162          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
163         {"size_1024_to_1522_packets",
164          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
165         {"size_1523_to_max_packets",
166          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
167         {"errors_with_bad_CRC",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
169         {"fragmented_errors",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
171         {"jabber_errors",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
173         {"unknown_protos_packets",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
175 };
176
177 /* [tx]_ is prepended to the name string here */
178 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
179         {"good_packets",
180          offsetof(struct vhost_queue, stats.pkts)},
181         {"total_bytes",
182          offsetof(struct vhost_queue, stats.bytes)},
183         {"missed_pkts",
184          offsetof(struct vhost_queue, stats.missed_pkts)},
185         {"broadcast_packets",
186          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
187         {"multicast_packets",
188          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
189         {"unicast_packets",
190          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
191         {"undersize_packets",
192          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
193         {"size_64_packets",
194          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
195         {"size_65_to_127_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
197         {"size_128_to_255_packets",
198          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
199         {"size_256_to_511_packets",
200          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
201         {"size_512_to_1023_packets",
202          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
203         {"size_1024_to_1522_packets",
204          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
205         {"size_1523_to_max_packets",
206          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
207         {"errors_with_bad_CRC",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
209 };
210
211 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
212                                 sizeof(vhost_rxport_stat_strings[0]))
213
214 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
215                                 sizeof(vhost_txport_stat_strings[0]))
216
217 static void
218 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
219 {
220         struct vhost_queue *vq = NULL;
221         unsigned int i = 0;
222
223         for (i = 0; i < dev->data->nb_rx_queues; i++) {
224                 vq = dev->data->rx_queues[i];
225                 if (!vq)
226                         continue;
227                 memset(&vq->stats, 0, sizeof(vq->stats));
228         }
229         for (i = 0; i < dev->data->nb_tx_queues; i++) {
230                 vq = dev->data->tx_queues[i];
231                 if (!vq)
232                         continue;
233                 memset(&vq->stats, 0, sizeof(vq->stats));
234         }
235 }
236
237 static int
238 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
239                            struct rte_eth_xstat_name *xstats_names,
240                            unsigned int limit __rte_unused)
241 {
242         unsigned int t = 0;
243         int count = 0;
244         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
245
246         if (!xstats_names)
247                 return nstats;
248         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
249                 snprintf(xstats_names[count].name,
250                          sizeof(xstats_names[count].name),
251                          "rx_%s", vhost_rxport_stat_strings[t].name);
252                 count++;
253         }
254         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
255                 snprintf(xstats_names[count].name,
256                          sizeof(xstats_names[count].name),
257                          "tx_%s", vhost_txport_stat_strings[t].name);
258                 count++;
259         }
260         return count;
261 }
262
263 static int
264 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
265                      unsigned int n)
266 {
267         unsigned int i;
268         unsigned int t;
269         unsigned int count = 0;
270         struct vhost_queue *vq = NULL;
271         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
272
273         if (n < nxstats)
274                 return nxstats;
275
276         for (i = 0; i < dev->data->nb_rx_queues; i++) {
277                 vq = dev->data->rx_queues[i];
278                 if (!vq)
279                         continue;
280                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
281                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
282                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
283         }
284         for (i = 0; i < dev->data->nb_tx_queues; i++) {
285                 vq = dev->data->tx_queues[i];
286                 if (!vq)
287                         continue;
288                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
289                                 + vq->stats.missed_pkts
290                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
291                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
292         }
293         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
294                 xstats[count].value = 0;
295                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
296                         vq = dev->data->rx_queues[i];
297                         if (!vq)
298                                 continue;
299                         xstats[count].value +=
300                                 *(uint64_t *)(((char *)vq)
301                                 + vhost_rxport_stat_strings[t].offset);
302                 }
303                 xstats[count].id = count;
304                 count++;
305         }
306         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
307                 xstats[count].value = 0;
308                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
309                         vq = dev->data->tx_queues[i];
310                         if (!vq)
311                                 continue;
312                         xstats[count].value +=
313                                 *(uint64_t *)(((char *)vq)
314                                 + vhost_txport_stat_strings[t].offset);
315                 }
316                 xstats[count].id = count;
317                 count++;
318         }
319         return count;
320 }
321
322 static inline void
323 vhost_count_multicast_broadcast(struct vhost_queue *vq,
324                                 struct rte_mbuf *mbuf)
325 {
326         struct ether_addr *ea = NULL;
327         struct vhost_stats *pstats = &vq->stats;
328
329         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
330         if (is_multicast_ether_addr(ea)) {
331                 if (is_broadcast_ether_addr(ea))
332                         pstats->xstats[VHOST_BROADCAST_PKT]++;
333                 else
334                         pstats->xstats[VHOST_MULTICAST_PKT]++;
335         }
336 }
337
338 static void
339 vhost_update_packet_xstats(struct vhost_queue *vq,
340                            struct rte_mbuf **bufs,
341                            uint16_t count)
342 {
343         uint32_t pkt_len = 0;
344         uint64_t i = 0;
345         uint64_t index;
346         struct vhost_stats *pstats = &vq->stats;
347
348         for (i = 0; i < count ; i++) {
349                 pkt_len = bufs[i]->pkt_len;
350                 if (pkt_len == 64) {
351                         pstats->xstats[VHOST_64_PKT]++;
352                 } else if (pkt_len > 64 && pkt_len < 1024) {
353                         index = (sizeof(pkt_len) * 8)
354                                 - __builtin_clz(pkt_len) - 5;
355                         pstats->xstats[index]++;
356                 } else {
357                         if (pkt_len < 64)
358                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
359                         else if (pkt_len <= 1522)
360                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
361                         else if (pkt_len > 1522)
362                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
363                 }
364                 vhost_count_multicast_broadcast(vq, bufs[i]);
365         }
366 }
367
368 static uint16_t
369 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
370 {
371         struct vhost_queue *r = q;
372         uint16_t i, nb_rx = 0;
373         uint16_t nb_receive = nb_bufs;
374
375         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
376                 return 0;
377
378         rte_atomic32_set(&r->while_queuing, 1);
379
380         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
381                 goto out;
382
383         /* Dequeue packets from guest TX queue */
384         while (nb_receive) {
385                 uint16_t nb_pkts;
386                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
387                                                  VHOST_MAX_PKT_BURST);
388
389                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
390                                                   r->mb_pool, &bufs[nb_rx],
391                                                   num);
392
393                 nb_rx += nb_pkts;
394                 nb_receive -= nb_pkts;
395                 if (nb_pkts < num)
396                         break;
397         }
398
399         r->stats.pkts += nb_rx;
400
401         for (i = 0; likely(i < nb_rx); i++) {
402                 bufs[i]->port = r->port;
403                 bufs[i]->vlan_tci = 0;
404
405                 if (r->internal->vlan_strip)
406                         rte_vlan_strip(bufs[i]);
407
408                 r->stats.bytes += bufs[i]->pkt_len;
409         }
410
411         vhost_update_packet_xstats(r, bufs, nb_rx);
412
413 out:
414         rte_atomic32_set(&r->while_queuing, 0);
415
416         return nb_rx;
417 }
418
419 static uint16_t
420 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
421 {
422         struct vhost_queue *r = q;
423         uint16_t i, nb_tx = 0;
424         uint16_t nb_send = 0;
425
426         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
427                 return 0;
428
429         rte_atomic32_set(&r->while_queuing, 1);
430
431         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
432                 goto out;
433
434         for (i = 0; i < nb_bufs; i++) {
435                 struct rte_mbuf *m = bufs[i];
436
437                 /* Do VLAN tag insertion */
438                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
439                         int error = rte_vlan_insert(&m);
440                         if (unlikely(error)) {
441                                 rte_pktmbuf_free(m);
442                                 continue;
443                         }
444                 }
445
446                 bufs[nb_send] = m;
447                 ++nb_send;
448         }
449
450         /* Enqueue packets to guest RX queue */
451         while (nb_send) {
452                 uint16_t nb_pkts;
453                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
454                                                  VHOST_MAX_PKT_BURST);
455
456                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
457                                                   &bufs[nb_tx], num);
458
459                 nb_tx += nb_pkts;
460                 nb_send -= nb_pkts;
461                 if (nb_pkts < num)
462                         break;
463         }
464
465         r->stats.pkts += nb_tx;
466         r->stats.missed_pkts += nb_bufs - nb_tx;
467
468         for (i = 0; likely(i < nb_tx); i++)
469                 r->stats.bytes += bufs[i]->pkt_len;
470
471         vhost_update_packet_xstats(r, bufs, nb_tx);
472
473         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
474          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
475          * are increased when packets are not transmitted successfully.
476          */
477         for (i = nb_tx; i < nb_bufs; i++)
478                 vhost_count_multicast_broadcast(r, bufs[i]);
479
480         for (i = 0; likely(i < nb_tx); i++)
481                 rte_pktmbuf_free(bufs[i]);
482 out:
483         rte_atomic32_set(&r->while_queuing, 0);
484
485         return nb_tx;
486 }
487
488 static int
489 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
490 {
491         struct pmd_internal *internal = dev->data->dev_private;
492         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
493
494         internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
495
496         return 0;
497 }
498
499 static inline struct internal_list *
500 find_internal_resource(char *ifname)
501 {
502         int found = 0;
503         struct internal_list *list;
504         struct pmd_internal *internal;
505
506         if (ifname == NULL)
507                 return NULL;
508
509         pthread_mutex_lock(&internal_list_lock);
510
511         TAILQ_FOREACH(list, &internal_list, next) {
512                 internal = list->eth_dev->data->dev_private;
513                 if (!strcmp(internal->iface_name, ifname)) {
514                         found = 1;
515                         break;
516                 }
517         }
518
519         pthread_mutex_unlock(&internal_list_lock);
520
521         if (!found)
522                 return NULL;
523
524         return list;
525 }
526
527 static int
528 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
529 {
530         struct rte_vhost_vring vring;
531         struct vhost_queue *vq;
532         int ret = 0;
533
534         vq = dev->data->rx_queues[qid];
535         if (!vq) {
536                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
537                 return -1;
538         }
539
540         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
541         if (ret < 0) {
542                 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
543                 return ret;
544         }
545         VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
546         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
547         rte_wmb();
548
549         return ret;
550 }
551
552 static int
553 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
554 {
555         struct rte_vhost_vring vring;
556         struct vhost_queue *vq;
557         int ret = 0;
558
559         vq = dev->data->rx_queues[qid];
560         if (!vq) {
561                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
562                 return -1;
563         }
564
565         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
566         if (ret < 0) {
567                 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
568                 return ret;
569         }
570         VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
571         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
572         rte_wmb();
573
574         return 0;
575 }
576
577 static void
578 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
579 {
580         struct rte_intr_handle *intr_handle = dev->intr_handle;
581
582         if (intr_handle) {
583                 if (intr_handle->intr_vec)
584                         free(intr_handle->intr_vec);
585                 free(intr_handle);
586         }
587
588         dev->intr_handle = NULL;
589 }
590
591 static int
592 eth_vhost_install_intr(struct rte_eth_dev *dev)
593 {
594         struct rte_vhost_vring vring;
595         struct vhost_queue *vq;
596         int count = 0;
597         int nb_rxq = dev->data->nb_rx_queues;
598         int i;
599         int ret;
600
601         /* uninstall firstly if we are reconnecting */
602         if (dev->intr_handle)
603                 eth_vhost_uninstall_intr(dev);
604
605         dev->intr_handle = malloc(sizeof(*dev->intr_handle));
606         if (!dev->intr_handle) {
607                 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
608                 return -ENOMEM;
609         }
610         memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
611
612         dev->intr_handle->efd_counter_size = sizeof(uint64_t);
613
614         dev->intr_handle->intr_vec =
615                 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
616
617         if (!dev->intr_handle->intr_vec) {
618                 VHOST_LOG(ERR,
619                         "Failed to allocate memory for interrupt vector\n");
620                 free(dev->intr_handle);
621                 return -ENOMEM;
622         }
623
624         VHOST_LOG(INFO, "Prepare intr vec\n");
625         for (i = 0; i < nb_rxq; i++) {
626                 vq = dev->data->rx_queues[i];
627                 if (!vq) {
628                         VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
629                         continue;
630                 }
631
632                 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
633                 if (ret < 0) {
634                         VHOST_LOG(INFO,
635                                 "Failed to get rxq-%d's vring, skip!\n", i);
636                         continue;
637                 }
638
639                 if (vring.kickfd < 0) {
640                         VHOST_LOG(INFO,
641                                 "rxq-%d's kickfd is invalid, skip!\n", i);
642                         continue;
643                 }
644                 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
645                 dev->intr_handle->efds[i] = vring.kickfd;
646                 count++;
647                 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
648         }
649
650         dev->intr_handle->nb_efd = count;
651         dev->intr_handle->max_intr = count + 1;
652         dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
653
654         return 0;
655 }
656
657 static void
658 update_queuing_status(struct rte_eth_dev *dev)
659 {
660         struct pmd_internal *internal = dev->data->dev_private;
661         struct vhost_queue *vq;
662         unsigned int i;
663         int allow_queuing = 1;
664
665         if (!dev->data->rx_queues || !dev->data->tx_queues)
666                 return;
667
668         if (rte_atomic32_read(&internal->started) == 0 ||
669             rte_atomic32_read(&internal->dev_attached) == 0)
670                 allow_queuing = 0;
671
672         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
673         for (i = 0; i < dev->data->nb_rx_queues; i++) {
674                 vq = dev->data->rx_queues[i];
675                 if (vq == NULL)
676                         continue;
677                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
678                 while (rte_atomic32_read(&vq->while_queuing))
679                         rte_pause();
680         }
681
682         for (i = 0; i < dev->data->nb_tx_queues; i++) {
683                 vq = dev->data->tx_queues[i];
684                 if (vq == NULL)
685                         continue;
686                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
687                 while (rte_atomic32_read(&vq->while_queuing))
688                         rte_pause();
689         }
690 }
691
692 static void
693 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
694 {
695         struct vhost_queue *vq;
696         int i;
697
698         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
699                 vq = eth_dev->data->rx_queues[i];
700                 if (!vq)
701                         continue;
702                 vq->vid = internal->vid;
703                 vq->internal = internal;
704                 vq->port = eth_dev->data->port_id;
705         }
706         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
707                 vq = eth_dev->data->tx_queues[i];
708                 if (!vq)
709                         continue;
710                 vq->vid = internal->vid;
711                 vq->internal = internal;
712                 vq->port = eth_dev->data->port_id;
713         }
714 }
715
716 static int
717 new_device(int vid)
718 {
719         struct rte_eth_dev *eth_dev;
720         struct internal_list *list;
721         struct pmd_internal *internal;
722         struct rte_eth_conf *dev_conf;
723         unsigned i;
724         char ifname[PATH_MAX];
725 #ifdef RTE_LIBRTE_VHOST_NUMA
726         int newnode;
727 #endif
728
729         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
730         list = find_internal_resource(ifname);
731         if (list == NULL) {
732                 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
733                 return -1;
734         }
735
736         eth_dev = list->eth_dev;
737         internal = eth_dev->data->dev_private;
738         dev_conf = &eth_dev->data->dev_conf;
739
740 #ifdef RTE_LIBRTE_VHOST_NUMA
741         newnode = rte_vhost_get_numa_node(vid);
742         if (newnode >= 0)
743                 eth_dev->data->numa_node = newnode;
744 #endif
745
746         internal->vid = vid;
747         if (rte_atomic32_read(&internal->started) == 1) {
748                 queue_setup(eth_dev, internal);
749
750                 if (dev_conf->intr_conf.rxq) {
751                         if (eth_vhost_install_intr(eth_dev) < 0) {
752                                 VHOST_LOG(INFO,
753                                         "Failed to install interrupt handler.");
754                                         return -1;
755                         }
756                 }
757         } else {
758                 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
759         }
760
761         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
762                 rte_vhost_enable_guest_notification(vid, i, 0);
763
764         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
765
766         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
767
768         rte_atomic32_set(&internal->dev_attached, 1);
769         update_queuing_status(eth_dev);
770
771         VHOST_LOG(INFO, "Vhost device %d created\n", vid);
772
773         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
774
775         return 0;
776 }
777
778 static void
779 destroy_device(int vid)
780 {
781         struct rte_eth_dev *eth_dev;
782         struct pmd_internal *internal;
783         struct vhost_queue *vq;
784         struct internal_list *list;
785         char ifname[PATH_MAX];
786         unsigned i;
787         struct rte_vhost_vring_state *state;
788
789         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
790         list = find_internal_resource(ifname);
791         if (list == NULL) {
792                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
793                 return;
794         }
795         eth_dev = list->eth_dev;
796         internal = eth_dev->data->dev_private;
797
798         rte_atomic32_set(&internal->dev_attached, 0);
799         update_queuing_status(eth_dev);
800
801         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
802
803         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
804                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
805                         vq = eth_dev->data->rx_queues[i];
806                         if (!vq)
807                                 continue;
808                         vq->vid = -1;
809                 }
810                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
811                         vq = eth_dev->data->tx_queues[i];
812                         if (!vq)
813                                 continue;
814                         vq->vid = -1;
815                 }
816         }
817
818         state = vring_states[eth_dev->data->port_id];
819         rte_spinlock_lock(&state->lock);
820         for (i = 0; i <= state->max_vring; i++) {
821                 state->cur[i] = false;
822                 state->seen[i] = false;
823         }
824         state->max_vring = 0;
825         rte_spinlock_unlock(&state->lock);
826
827         VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
828         eth_vhost_uninstall_intr(eth_dev);
829
830         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
831 }
832
833 static int
834 vring_state_changed(int vid, uint16_t vring, int enable)
835 {
836         struct rte_vhost_vring_state *state;
837         struct rte_eth_dev *eth_dev;
838         struct internal_list *list;
839         char ifname[PATH_MAX];
840
841         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
842         list = find_internal_resource(ifname);
843         if (list == NULL) {
844                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
845                 return -1;
846         }
847
848         eth_dev = list->eth_dev;
849         /* won't be NULL */
850         state = vring_states[eth_dev->data->port_id];
851         rte_spinlock_lock(&state->lock);
852         state->cur[vring] = enable;
853         state->max_vring = RTE_MAX(vring, state->max_vring);
854         rte_spinlock_unlock(&state->lock);
855
856         VHOST_LOG(INFO, "vring%u is %s\n",
857                         vring, enable ? "enabled" : "disabled");
858
859         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
860
861         return 0;
862 }
863
864 static struct vhost_device_ops vhost_ops = {
865         .new_device          = new_device,
866         .destroy_device      = destroy_device,
867         .vring_state_changed = vring_state_changed,
868 };
869
870 int
871 rte_eth_vhost_get_queue_event(uint16_t port_id,
872                 struct rte_eth_vhost_queue_event *event)
873 {
874         struct rte_vhost_vring_state *state;
875         unsigned int i;
876         int idx;
877
878         if (port_id >= RTE_MAX_ETHPORTS) {
879                 VHOST_LOG(ERR, "Invalid port id\n");
880                 return -1;
881         }
882
883         state = vring_states[port_id];
884         if (!state) {
885                 VHOST_LOG(ERR, "Unused port\n");
886                 return -1;
887         }
888
889         rte_spinlock_lock(&state->lock);
890         for (i = 0; i <= state->max_vring; i++) {
891                 idx = state->index++ % (state->max_vring + 1);
892
893                 if (state->cur[idx] != state->seen[idx]) {
894                         state->seen[idx] = state->cur[idx];
895                         event->queue_id = idx / 2;
896                         event->rx = idx & 1;
897                         event->enable = state->cur[idx];
898                         rte_spinlock_unlock(&state->lock);
899                         return 0;
900                 }
901         }
902         rte_spinlock_unlock(&state->lock);
903
904         return -1;
905 }
906
907 int
908 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
909 {
910         struct internal_list *list;
911         struct rte_eth_dev *eth_dev;
912         struct vhost_queue *vq;
913         int vid = -1;
914
915         if (!rte_eth_dev_is_valid_port(port_id))
916                 return -1;
917
918         pthread_mutex_lock(&internal_list_lock);
919
920         TAILQ_FOREACH(list, &internal_list, next) {
921                 eth_dev = list->eth_dev;
922                 if (eth_dev->data->port_id == port_id) {
923                         vq = eth_dev->data->rx_queues[0];
924                         if (vq) {
925                                 vid = vq->vid;
926                         }
927                         break;
928                 }
929         }
930
931         pthread_mutex_unlock(&internal_list_lock);
932
933         return vid;
934 }
935
936 static int
937 eth_dev_start(struct rte_eth_dev *eth_dev)
938 {
939         struct pmd_internal *internal = eth_dev->data->dev_private;
940         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
941
942         queue_setup(eth_dev, internal);
943
944         if (rte_atomic32_read(&internal->dev_attached) == 1) {
945                 if (dev_conf->intr_conf.rxq) {
946                         if (eth_vhost_install_intr(eth_dev) < 0) {
947                                 VHOST_LOG(INFO,
948                                         "Failed to install interrupt handler.");
949                                         return -1;
950                         }
951                 }
952         }
953
954         rte_atomic32_set(&internal->started, 1);
955         update_queuing_status(eth_dev);
956
957         return 0;
958 }
959
960 static void
961 eth_dev_stop(struct rte_eth_dev *dev)
962 {
963         struct pmd_internal *internal = dev->data->dev_private;
964
965         rte_atomic32_set(&internal->started, 0);
966         update_queuing_status(dev);
967 }
968
969 static void
970 eth_dev_close(struct rte_eth_dev *dev)
971 {
972         struct pmd_internal *internal;
973         struct internal_list *list;
974         unsigned int i;
975
976         internal = dev->data->dev_private;
977         if (!internal)
978                 return;
979
980         eth_dev_stop(dev);
981
982         rte_vhost_driver_unregister(internal->iface_name);
983
984         list = find_internal_resource(internal->iface_name);
985         if (!list)
986                 return;
987
988         pthread_mutex_lock(&internal_list_lock);
989         TAILQ_REMOVE(&internal_list, list, next);
990         pthread_mutex_unlock(&internal_list_lock);
991         rte_free(list);
992
993         if (dev->data->rx_queues)
994                 for (i = 0; i < dev->data->nb_rx_queues; i++)
995                         rte_free(dev->data->rx_queues[i]);
996
997         if (dev->data->tx_queues)
998                 for (i = 0; i < dev->data->nb_tx_queues; i++)
999                         rte_free(dev->data->tx_queues[i]);
1000
1001         rte_free(dev->data->mac_addrs);
1002         free(internal->dev_name);
1003         free(internal->iface_name);
1004         rte_free(internal);
1005
1006         dev->data->dev_private = NULL;
1007 }
1008
1009 static int
1010 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1011                    uint16_t nb_rx_desc __rte_unused,
1012                    unsigned int socket_id,
1013                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1014                    struct rte_mempool *mb_pool)
1015 {
1016         struct vhost_queue *vq;
1017
1018         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1019                         RTE_CACHE_LINE_SIZE, socket_id);
1020         if (vq == NULL) {
1021                 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1022                 return -ENOMEM;
1023         }
1024
1025         vq->mb_pool = mb_pool;
1026         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1027         dev->data->rx_queues[rx_queue_id] = vq;
1028
1029         return 0;
1030 }
1031
1032 static int
1033 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1034                    uint16_t nb_tx_desc __rte_unused,
1035                    unsigned int socket_id,
1036                    const struct rte_eth_txconf *tx_conf __rte_unused)
1037 {
1038         struct vhost_queue *vq;
1039
1040         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1041                         RTE_CACHE_LINE_SIZE, socket_id);
1042         if (vq == NULL) {
1043                 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1044                 return -ENOMEM;
1045         }
1046
1047         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1048         dev->data->tx_queues[tx_queue_id] = vq;
1049
1050         return 0;
1051 }
1052
1053 static void
1054 eth_dev_info(struct rte_eth_dev *dev,
1055              struct rte_eth_dev_info *dev_info)
1056 {
1057         struct pmd_internal *internal;
1058
1059         internal = dev->data->dev_private;
1060         if (internal == NULL) {
1061                 VHOST_LOG(ERR, "Invalid device specified\n");
1062                 return;
1063         }
1064
1065         dev_info->max_mac_addrs = 1;
1066         dev_info->max_rx_pktlen = (uint32_t)-1;
1067         dev_info->max_rx_queues = internal->max_queues;
1068         dev_info->max_tx_queues = internal->max_queues;
1069         dev_info->min_rx_bufsize = 0;
1070
1071         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1072                                 DEV_TX_OFFLOAD_VLAN_INSERT;
1073         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1074 }
1075
1076 static int
1077 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1078 {
1079         unsigned i;
1080         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
1081         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1082         struct vhost_queue *vq;
1083
1084         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1085                         i < dev->data->nb_rx_queues; i++) {
1086                 if (dev->data->rx_queues[i] == NULL)
1087                         continue;
1088                 vq = dev->data->rx_queues[i];
1089                 stats->q_ipackets[i] = vq->stats.pkts;
1090                 rx_total += stats->q_ipackets[i];
1091
1092                 stats->q_ibytes[i] = vq->stats.bytes;
1093                 rx_total_bytes += stats->q_ibytes[i];
1094         }
1095
1096         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1097                         i < dev->data->nb_tx_queues; i++) {
1098                 if (dev->data->tx_queues[i] == NULL)
1099                         continue;
1100                 vq = dev->data->tx_queues[i];
1101                 stats->q_opackets[i] = vq->stats.pkts;
1102                 tx_missed_total += vq->stats.missed_pkts;
1103                 tx_total += stats->q_opackets[i];
1104
1105                 stats->q_obytes[i] = vq->stats.bytes;
1106                 tx_total_bytes += stats->q_obytes[i];
1107         }
1108
1109         stats->ipackets = rx_total;
1110         stats->opackets = tx_total;
1111         stats->oerrors = tx_missed_total;
1112         stats->ibytes = rx_total_bytes;
1113         stats->obytes = tx_total_bytes;
1114
1115         return 0;
1116 }
1117
1118 static void
1119 eth_stats_reset(struct rte_eth_dev *dev)
1120 {
1121         struct vhost_queue *vq;
1122         unsigned i;
1123
1124         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1125                 if (dev->data->rx_queues[i] == NULL)
1126                         continue;
1127                 vq = dev->data->rx_queues[i];
1128                 vq->stats.pkts = 0;
1129                 vq->stats.bytes = 0;
1130         }
1131         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1132                 if (dev->data->tx_queues[i] == NULL)
1133                         continue;
1134                 vq = dev->data->tx_queues[i];
1135                 vq->stats.pkts = 0;
1136                 vq->stats.bytes = 0;
1137                 vq->stats.missed_pkts = 0;
1138         }
1139 }
1140
1141 static void
1142 eth_queue_release(void *q)
1143 {
1144         rte_free(q);
1145 }
1146
1147 static int
1148 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1149 {
1150         /*
1151          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1152          * and releases mbuf, so nothing to cleanup.
1153          */
1154         return 0;
1155 }
1156
1157 static int
1158 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1159                 int wait_to_complete __rte_unused)
1160 {
1161         return 0;
1162 }
1163
1164 static uint32_t
1165 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1166 {
1167         struct vhost_queue *vq;
1168
1169         vq = dev->data->rx_queues[rx_queue_id];
1170         if (vq == NULL)
1171                 return 0;
1172
1173         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1174 }
1175
1176 static const struct eth_dev_ops ops = {
1177         .dev_start = eth_dev_start,
1178         .dev_stop = eth_dev_stop,
1179         .dev_close = eth_dev_close,
1180         .dev_configure = eth_dev_configure,
1181         .dev_infos_get = eth_dev_info,
1182         .rx_queue_setup = eth_rx_queue_setup,
1183         .tx_queue_setup = eth_tx_queue_setup,
1184         .rx_queue_release = eth_queue_release,
1185         .tx_queue_release = eth_queue_release,
1186         .tx_done_cleanup = eth_tx_done_cleanup,
1187         .rx_queue_count = eth_rx_queue_count,
1188         .link_update = eth_link_update,
1189         .stats_get = eth_stats_get,
1190         .stats_reset = eth_stats_reset,
1191         .xstats_reset = vhost_dev_xstats_reset,
1192         .xstats_get = vhost_dev_xstats_get,
1193         .xstats_get_names = vhost_dev_xstats_get_names,
1194         .rx_queue_intr_enable = eth_rxq_intr_enable,
1195         .rx_queue_intr_disable = eth_rxq_intr_disable,
1196 };
1197
1198 static struct rte_vdev_driver pmd_vhost_drv;
1199
1200 static int
1201 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1202         int16_t queues, const unsigned int numa_node, uint64_t flags)
1203 {
1204         const char *name = rte_vdev_device_name(dev);
1205         struct rte_eth_dev_data *data;
1206         struct pmd_internal *internal = NULL;
1207         struct rte_eth_dev *eth_dev = NULL;
1208         struct ether_addr *eth_addr = NULL;
1209         struct rte_vhost_vring_state *vring_state = NULL;
1210         struct internal_list *list = NULL;
1211
1212         VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1213                 numa_node);
1214
1215         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1216         if (list == NULL)
1217                 goto error;
1218
1219         /* reserve an ethdev entry */
1220         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1221         if (eth_dev == NULL)
1222                 goto error;
1223
1224         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1225         if (eth_addr == NULL)
1226                 goto error;
1227         *eth_addr = base_eth_addr;
1228         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1229
1230         vring_state = rte_zmalloc_socket(name,
1231                         sizeof(*vring_state), 0, numa_node);
1232         if (vring_state == NULL)
1233                 goto error;
1234
1235         /* now put it all together
1236          * - store queue data in internal,
1237          * - point eth_dev_data to internals
1238          * - and point eth_dev structure to new eth_dev_data structure
1239          */
1240         internal = eth_dev->data->dev_private;
1241         internal->dev_name = strdup(name);
1242         if (internal->dev_name == NULL)
1243                 goto error;
1244         internal->iface_name = strdup(iface_name);
1245         if (internal->iface_name == NULL)
1246                 goto error;
1247
1248         list->eth_dev = eth_dev;
1249         pthread_mutex_lock(&internal_list_lock);
1250         TAILQ_INSERT_TAIL(&internal_list, list, next);
1251         pthread_mutex_unlock(&internal_list_lock);
1252
1253         rte_spinlock_init(&vring_state->lock);
1254         vring_states[eth_dev->data->port_id] = vring_state;
1255
1256         data = eth_dev->data;
1257         data->nb_rx_queues = queues;
1258         data->nb_tx_queues = queues;
1259         internal->max_queues = queues;
1260         internal->vid = -1;
1261         data->dev_link = pmd_link;
1262         data->mac_addrs = eth_addr;
1263         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1264
1265         eth_dev->dev_ops = &ops;
1266
1267         /* finally assign rx and tx ops */
1268         eth_dev->rx_pkt_burst = eth_vhost_rx;
1269         eth_dev->tx_pkt_burst = eth_vhost_tx;
1270
1271         if (rte_vhost_driver_register(iface_name, flags))
1272                 goto error;
1273
1274         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1275                 VHOST_LOG(ERR, "Can't register callbacks\n");
1276                 goto error;
1277         }
1278
1279         if (rte_vhost_driver_start(iface_name) < 0) {
1280                 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1281                         iface_name);
1282                 goto error;
1283         }
1284
1285         rte_eth_dev_probing_finish(eth_dev);
1286         return data->port_id;
1287
1288 error:
1289         if (internal) {
1290                 free(internal->iface_name);
1291                 free(internal->dev_name);
1292         }
1293         rte_free(vring_state);
1294         rte_free(eth_addr);
1295         if (eth_dev)
1296                 rte_eth_dev_release_port(eth_dev);
1297         rte_free(internal);
1298         rte_free(list);
1299
1300         return -1;
1301 }
1302
1303 static inline int
1304 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1305 {
1306         const char **iface_name = extra_args;
1307
1308         if (value == NULL)
1309                 return -1;
1310
1311         *iface_name = value;
1312
1313         return 0;
1314 }
1315
1316 static inline int
1317 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1318 {
1319         uint16_t *n = extra_args;
1320
1321         if (value == NULL || extra_args == NULL)
1322                 return -EINVAL;
1323
1324         *n = (uint16_t)strtoul(value, NULL, 0);
1325         if (*n == USHRT_MAX && errno == ERANGE)
1326                 return -1;
1327
1328         return 0;
1329 }
1330
1331 static int
1332 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1333 {
1334         struct rte_kvargs *kvlist = NULL;
1335         int ret = 0;
1336         char *iface_name;
1337         uint16_t queues;
1338         uint64_t flags = 0;
1339         int client_mode = 0;
1340         int dequeue_zero_copy = 0;
1341         int iommu_support = 0;
1342         struct rte_eth_dev *eth_dev;
1343         const char *name = rte_vdev_device_name(dev);
1344
1345         VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1346
1347         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1348                 eth_dev = rte_eth_dev_attach_secondary(name);
1349                 if (!eth_dev) {
1350                         VHOST_LOG(ERR, "Failed to probe %s\n", name);
1351                         return -1;
1352                 }
1353                 /* TODO: request info from primary to set up Rx and Tx */
1354                 eth_dev->dev_ops = &ops;
1355                 eth_dev->device = &dev->device;
1356                 rte_eth_dev_probing_finish(eth_dev);
1357                 return 0;
1358         }
1359
1360         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1361         if (kvlist == NULL)
1362                 return -1;
1363
1364         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1365                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1366                                          &open_iface, &iface_name);
1367                 if (ret < 0)
1368                         goto out_free;
1369         } else {
1370                 ret = -1;
1371                 goto out_free;
1372         }
1373
1374         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1375                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1376                                          &open_int, &queues);
1377                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1378                         goto out_free;
1379
1380         } else
1381                 queues = 1;
1382
1383         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1384                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1385                                          &open_int, &client_mode);
1386                 if (ret < 0)
1387                         goto out_free;
1388
1389                 if (client_mode)
1390                         flags |= RTE_VHOST_USER_CLIENT;
1391         }
1392
1393         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1394                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1395                                          &open_int, &dequeue_zero_copy);
1396                 if (ret < 0)
1397                         goto out_free;
1398
1399                 if (dequeue_zero_copy)
1400                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1401         }
1402
1403         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1404                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1405                                          &open_int, &iommu_support);
1406                 if (ret < 0)
1407                         goto out_free;
1408
1409                 if (iommu_support)
1410                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1411         }
1412
1413         if (dev->device.numa_node == SOCKET_ID_ANY)
1414                 dev->device.numa_node = rte_socket_id();
1415
1416         eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1417                 flags);
1418
1419 out_free:
1420         rte_kvargs_free(kvlist);
1421         return ret;
1422 }
1423
1424 static int
1425 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1426 {
1427         const char *name;
1428         struct rte_eth_dev *eth_dev = NULL;
1429
1430         name = rte_vdev_device_name(dev);
1431         VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1432
1433         /* find an ethdev entry */
1434         eth_dev = rte_eth_dev_allocated(name);
1435         if (eth_dev == NULL)
1436                 return -ENODEV;
1437
1438         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1439                 return rte_eth_dev_release_port_secondary(eth_dev);
1440
1441         eth_dev_close(eth_dev);
1442
1443         rte_free(vring_states[eth_dev->data->port_id]);
1444         vring_states[eth_dev->data->port_id] = NULL;
1445
1446         rte_eth_dev_release_port(eth_dev);
1447
1448         return 0;
1449 }
1450
1451 static struct rte_vdev_driver pmd_vhost_drv = {
1452         .probe = rte_pmd_vhost_probe,
1453         .remove = rte_pmd_vhost_remove,
1454 };
1455
1456 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1457 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1458 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1459         "iface=<ifc> "
1460         "queues=<int>");
1461
1462 RTE_INIT(vhost_init_log)
1463 {
1464         vhost_logtype = rte_log_register("pmd.net.vhost");
1465         if (vhost_logtype >= 0)
1466                 rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);
1467 }