951929c663850fac32c4917429e4db2a58cecdeb
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 IGEL Co., Ltd.
3  * Copyright(c) 2016-2018 Intel Corporation
4  */
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
18
19 #include "rte_eth_vhost.h"
20
21 RTE_LOG_REGISTER(vhost_logtype, pmd.net.vhost, NOTICE);
22
23 #define VHOST_LOG(level, ...) \
24         rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
25
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
27
28 #define ETH_VHOST_IFACE_ARG             "iface"
29 #define ETH_VHOST_QUEUES_ARG            "queues"
30 #define ETH_VHOST_CLIENT_ARG            "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT      "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF  "linear-buffer"
36 #define ETH_VHOST_EXT_BUF  "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
38
39 static const char *valid_arguments[] = {
40         ETH_VHOST_IFACE_ARG,
41         ETH_VHOST_QUEUES_ARG,
42         ETH_VHOST_CLIENT_ARG,
43         ETH_VHOST_DEQUEUE_ZERO_COPY,
44         ETH_VHOST_IOMMU_SUPPORT,
45         ETH_VHOST_POSTCOPY_SUPPORT,
46         ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
47         ETH_VHOST_LINEAR_BUF,
48         ETH_VHOST_EXT_BUF,
49         NULL
50 };
51
52 static struct rte_ether_addr base_eth_addr = {
53         .addr_bytes = {
54                 0x56 /* V */,
55                 0x48 /* H */,
56                 0x4F /* O */,
57                 0x53 /* S */,
58                 0x54 /* T */,
59                 0x00
60         }
61 };
62
63 enum vhost_xstats_pkts {
64         VHOST_UNDERSIZE_PKT = 0,
65         VHOST_64_PKT,
66         VHOST_65_TO_127_PKT,
67         VHOST_128_TO_255_PKT,
68         VHOST_256_TO_511_PKT,
69         VHOST_512_TO_1023_PKT,
70         VHOST_1024_TO_1522_PKT,
71         VHOST_1523_TO_MAX_PKT,
72         VHOST_BROADCAST_PKT,
73         VHOST_MULTICAST_PKT,
74         VHOST_UNICAST_PKT,
75         VHOST_ERRORS_PKT,
76         VHOST_ERRORS_FRAGMENTED,
77         VHOST_ERRORS_JABBER,
78         VHOST_UNKNOWN_PROTOCOL,
79         VHOST_XSTATS_MAX,
80 };
81
82 struct vhost_stats {
83         uint64_t pkts;
84         uint64_t bytes;
85         uint64_t missed_pkts;
86         uint64_t xstats[VHOST_XSTATS_MAX];
87 };
88
89 struct vhost_queue {
90         int vid;
91         rte_atomic32_t allow_queuing;
92         rte_atomic32_t while_queuing;
93         struct pmd_internal *internal;
94         struct rte_mempool *mb_pool;
95         uint16_t port;
96         uint16_t virtqueue_id;
97         struct vhost_stats stats;
98 };
99
100 struct pmd_internal {
101         rte_atomic32_t dev_attached;
102         char *iface_name;
103         uint64_t flags;
104         uint64_t disable_flags;
105         uint16_t max_queues;
106         int vid;
107         rte_atomic32_t started;
108         uint8_t vlan_strip;
109 };
110
111 struct internal_list {
112         TAILQ_ENTRY(internal_list) next;
113         struct rte_eth_dev *eth_dev;
114 };
115
116 TAILQ_HEAD(internal_list_head, internal_list);
117 static struct internal_list_head internal_list =
118         TAILQ_HEAD_INITIALIZER(internal_list);
119
120 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
121
122 static struct rte_eth_link pmd_link = {
123                 .link_speed = 10000,
124                 .link_duplex = ETH_LINK_FULL_DUPLEX,
125                 .link_status = ETH_LINK_DOWN
126 };
127
128 struct rte_vhost_vring_state {
129         rte_spinlock_t lock;
130
131         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
132         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
133         unsigned int index;
134         unsigned int max_vring;
135 };
136
137 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
138
139 #define VHOST_XSTATS_NAME_SIZE 64
140
141 struct vhost_xstats_name_off {
142         char name[VHOST_XSTATS_NAME_SIZE];
143         uint64_t offset;
144 };
145
146 /* [rx]_is prepended to the name string here */
147 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
148         {"good_packets",
149          offsetof(struct vhost_queue, stats.pkts)},
150         {"total_bytes",
151          offsetof(struct vhost_queue, stats.bytes)},
152         {"missed_pkts",
153          offsetof(struct vhost_queue, stats.missed_pkts)},
154         {"broadcast_packets",
155          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
156         {"multicast_packets",
157          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
158         {"unicast_packets",
159          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
160          {"undersize_packets",
161          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
162         {"size_64_packets",
163          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
164         {"size_65_to_127_packets",
165          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
166         {"size_128_to_255_packets",
167          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
168         {"size_256_to_511_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
170         {"size_512_to_1023_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
172         {"size_1024_to_1522_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
174         {"size_1523_to_max_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
176         {"errors_with_bad_CRC",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
178         {"fragmented_errors",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
180         {"jabber_errors",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
182         {"unknown_protos_packets",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
184 };
185
186 /* [tx]_ is prepended to the name string here */
187 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
188         {"good_packets",
189          offsetof(struct vhost_queue, stats.pkts)},
190         {"total_bytes",
191          offsetof(struct vhost_queue, stats.bytes)},
192         {"missed_pkts",
193          offsetof(struct vhost_queue, stats.missed_pkts)},
194         {"broadcast_packets",
195          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
196         {"multicast_packets",
197          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
198         {"unicast_packets",
199          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
200         {"undersize_packets",
201          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
202         {"size_64_packets",
203          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
204         {"size_65_to_127_packets",
205          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
206         {"size_128_to_255_packets",
207          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
208         {"size_256_to_511_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
210         {"size_512_to_1023_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
212         {"size_1024_to_1522_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
214         {"size_1523_to_max_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
216         {"errors_with_bad_CRC",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
218 };
219
220 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
221                                 sizeof(vhost_rxport_stat_strings[0]))
222
223 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
224                                 sizeof(vhost_txport_stat_strings[0]))
225
226 static int
227 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
228 {
229         struct vhost_queue *vq = NULL;
230         unsigned int i = 0;
231
232         for (i = 0; i < dev->data->nb_rx_queues; i++) {
233                 vq = dev->data->rx_queues[i];
234                 if (!vq)
235                         continue;
236                 memset(&vq->stats, 0, sizeof(vq->stats));
237         }
238         for (i = 0; i < dev->data->nb_tx_queues; i++) {
239                 vq = dev->data->tx_queues[i];
240                 if (!vq)
241                         continue;
242                 memset(&vq->stats, 0, sizeof(vq->stats));
243         }
244
245         return 0;
246 }
247
248 static int
249 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
250                            struct rte_eth_xstat_name *xstats_names,
251                            unsigned int limit __rte_unused)
252 {
253         unsigned int t = 0;
254         int count = 0;
255         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
256
257         if (!xstats_names)
258                 return nstats;
259         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
260                 snprintf(xstats_names[count].name,
261                          sizeof(xstats_names[count].name),
262                          "rx_%s", vhost_rxport_stat_strings[t].name);
263                 count++;
264         }
265         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
266                 snprintf(xstats_names[count].name,
267                          sizeof(xstats_names[count].name),
268                          "tx_%s", vhost_txport_stat_strings[t].name);
269                 count++;
270         }
271         return count;
272 }
273
274 static int
275 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
276                      unsigned int n)
277 {
278         unsigned int i;
279         unsigned int t;
280         unsigned int count = 0;
281         struct vhost_queue *vq = NULL;
282         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
283
284         if (n < nxstats)
285                 return nxstats;
286
287         for (i = 0; i < dev->data->nb_rx_queues; i++) {
288                 vq = dev->data->rx_queues[i];
289                 if (!vq)
290                         continue;
291                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
292                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
293                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
294         }
295         for (i = 0; i < dev->data->nb_tx_queues; i++) {
296                 vq = dev->data->tx_queues[i];
297                 if (!vq)
298                         continue;
299                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
300                                 + vq->stats.missed_pkts
301                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
302                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
303         }
304         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
305                 xstats[count].value = 0;
306                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
307                         vq = dev->data->rx_queues[i];
308                         if (!vq)
309                                 continue;
310                         xstats[count].value +=
311                                 *(uint64_t *)(((char *)vq)
312                                 + vhost_rxport_stat_strings[t].offset);
313                 }
314                 xstats[count].id = count;
315                 count++;
316         }
317         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
318                 xstats[count].value = 0;
319                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
320                         vq = dev->data->tx_queues[i];
321                         if (!vq)
322                                 continue;
323                         xstats[count].value +=
324                                 *(uint64_t *)(((char *)vq)
325                                 + vhost_txport_stat_strings[t].offset);
326                 }
327                 xstats[count].id = count;
328                 count++;
329         }
330         return count;
331 }
332
333 static inline void
334 vhost_count_multicast_broadcast(struct vhost_queue *vq,
335                                 struct rte_mbuf *mbuf)
336 {
337         struct rte_ether_addr *ea = NULL;
338         struct vhost_stats *pstats = &vq->stats;
339
340         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
341         if (rte_is_multicast_ether_addr(ea)) {
342                 if (rte_is_broadcast_ether_addr(ea))
343                         pstats->xstats[VHOST_BROADCAST_PKT]++;
344                 else
345                         pstats->xstats[VHOST_MULTICAST_PKT]++;
346         }
347 }
348
349 static void
350 vhost_update_packet_xstats(struct vhost_queue *vq,
351                            struct rte_mbuf **bufs,
352                            uint16_t count)
353 {
354         uint32_t pkt_len = 0;
355         uint64_t i = 0;
356         uint64_t index;
357         struct vhost_stats *pstats = &vq->stats;
358
359         for (i = 0; i < count ; i++) {
360                 pkt_len = bufs[i]->pkt_len;
361                 if (pkt_len == 64) {
362                         pstats->xstats[VHOST_64_PKT]++;
363                 } else if (pkt_len > 64 && pkt_len < 1024) {
364                         index = (sizeof(pkt_len) * 8)
365                                 - __builtin_clz(pkt_len) - 5;
366                         pstats->xstats[index]++;
367                 } else {
368                         if (pkt_len < 64)
369                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
370                         else if (pkt_len <= 1522)
371                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
372                         else if (pkt_len > 1522)
373                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
374                 }
375                 vhost_count_multicast_broadcast(vq, bufs[i]);
376         }
377 }
378
379 static uint16_t
380 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
381 {
382         struct vhost_queue *r = q;
383         uint16_t i, nb_rx = 0;
384         uint16_t nb_receive = nb_bufs;
385
386         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
387                 return 0;
388
389         rte_atomic32_set(&r->while_queuing, 1);
390
391         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
392                 goto out;
393
394         /* Dequeue packets from guest TX queue */
395         while (nb_receive) {
396                 uint16_t nb_pkts;
397                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
398                                                  VHOST_MAX_PKT_BURST);
399
400                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
401                                                   r->mb_pool, &bufs[nb_rx],
402                                                   num);
403
404                 nb_rx += nb_pkts;
405                 nb_receive -= nb_pkts;
406                 if (nb_pkts < num)
407                         break;
408         }
409
410         r->stats.pkts += nb_rx;
411
412         for (i = 0; likely(i < nb_rx); i++) {
413                 bufs[i]->port = r->port;
414                 bufs[i]->vlan_tci = 0;
415
416                 if (r->internal->vlan_strip)
417                         rte_vlan_strip(bufs[i]);
418
419                 r->stats.bytes += bufs[i]->pkt_len;
420         }
421
422         vhost_update_packet_xstats(r, bufs, nb_rx);
423
424 out:
425         rte_atomic32_set(&r->while_queuing, 0);
426
427         return nb_rx;
428 }
429
430 static uint16_t
431 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
432 {
433         struct vhost_queue *r = q;
434         uint16_t i, nb_tx = 0;
435         uint16_t nb_send = 0;
436
437         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
438                 return 0;
439
440         rte_atomic32_set(&r->while_queuing, 1);
441
442         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
443                 goto out;
444
445         for (i = 0; i < nb_bufs; i++) {
446                 struct rte_mbuf *m = bufs[i];
447
448                 /* Do VLAN tag insertion */
449                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
450                         int error = rte_vlan_insert(&m);
451                         if (unlikely(error)) {
452                                 rte_pktmbuf_free(m);
453                                 continue;
454                         }
455                 }
456
457                 bufs[nb_send] = m;
458                 ++nb_send;
459         }
460
461         /* Enqueue packets to guest RX queue */
462         while (nb_send) {
463                 uint16_t nb_pkts;
464                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
465                                                  VHOST_MAX_PKT_BURST);
466
467                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
468                                                   &bufs[nb_tx], num);
469
470                 nb_tx += nb_pkts;
471                 nb_send -= nb_pkts;
472                 if (nb_pkts < num)
473                         break;
474         }
475
476         r->stats.pkts += nb_tx;
477         r->stats.missed_pkts += nb_bufs - nb_tx;
478
479         for (i = 0; likely(i < nb_tx); i++)
480                 r->stats.bytes += bufs[i]->pkt_len;
481
482         vhost_update_packet_xstats(r, bufs, nb_tx);
483
484         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
485          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
486          * are increased when packets are not transmitted successfully.
487          */
488         for (i = nb_tx; i < nb_bufs; i++)
489                 vhost_count_multicast_broadcast(r, bufs[i]);
490
491         for (i = 0; likely(i < nb_tx); i++)
492                 rte_pktmbuf_free(bufs[i]);
493 out:
494         rte_atomic32_set(&r->while_queuing, 0);
495
496         return nb_tx;
497 }
498
499 static inline struct internal_list *
500 find_internal_resource(char *ifname)
501 {
502         int found = 0;
503         struct internal_list *list;
504         struct pmd_internal *internal;
505
506         if (ifname == NULL)
507                 return NULL;
508
509         pthread_mutex_lock(&internal_list_lock);
510
511         TAILQ_FOREACH(list, &internal_list, next) {
512                 internal = list->eth_dev->data->dev_private;
513                 if (!strcmp(internal->iface_name, ifname)) {
514                         found = 1;
515                         break;
516                 }
517         }
518
519         pthread_mutex_unlock(&internal_list_lock);
520
521         if (!found)
522                 return NULL;
523
524         return list;
525 }
526
527 static int
528 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
529 {
530         struct rte_vhost_vring vring;
531         struct vhost_queue *vq;
532         int ret = 0;
533
534         vq = dev->data->rx_queues[qid];
535         if (!vq) {
536                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
537                 return -1;
538         }
539
540         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
541         if (ret < 0) {
542                 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
543                 return ret;
544         }
545         VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
546         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
547         rte_wmb();
548
549         return ret;
550 }
551
552 static int
553 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
554 {
555         struct rte_vhost_vring vring;
556         struct vhost_queue *vq;
557         int ret = 0;
558
559         vq = dev->data->rx_queues[qid];
560         if (!vq) {
561                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
562                 return -1;
563         }
564
565         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
566         if (ret < 0) {
567                 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
568                 return ret;
569         }
570         VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
571         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
572         rte_wmb();
573
574         return 0;
575 }
576
577 static void
578 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
579 {
580         struct rte_intr_handle *intr_handle = dev->intr_handle;
581
582         if (intr_handle) {
583                 if (intr_handle->intr_vec)
584                         free(intr_handle->intr_vec);
585                 free(intr_handle);
586         }
587
588         dev->intr_handle = NULL;
589 }
590
591 static int
592 eth_vhost_install_intr(struct rte_eth_dev *dev)
593 {
594         struct rte_vhost_vring vring;
595         struct vhost_queue *vq;
596         int count = 0;
597         int nb_rxq = dev->data->nb_rx_queues;
598         int i;
599         int ret;
600
601         /* uninstall firstly if we are reconnecting */
602         if (dev->intr_handle)
603                 eth_vhost_uninstall_intr(dev);
604
605         dev->intr_handle = malloc(sizeof(*dev->intr_handle));
606         if (!dev->intr_handle) {
607                 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
608                 return -ENOMEM;
609         }
610         memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
611
612         dev->intr_handle->efd_counter_size = sizeof(uint64_t);
613
614         dev->intr_handle->intr_vec =
615                 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
616
617         if (!dev->intr_handle->intr_vec) {
618                 VHOST_LOG(ERR,
619                         "Failed to allocate memory for interrupt vector\n");
620                 free(dev->intr_handle);
621                 return -ENOMEM;
622         }
623
624         VHOST_LOG(INFO, "Prepare intr vec\n");
625         for (i = 0; i < nb_rxq; i++) {
626                 vq = dev->data->rx_queues[i];
627                 if (!vq) {
628                         VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
629                         continue;
630                 }
631
632                 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
633                 if (ret < 0) {
634                         VHOST_LOG(INFO,
635                                 "Failed to get rxq-%d's vring, skip!\n", i);
636                         continue;
637                 }
638
639                 if (vring.kickfd < 0) {
640                         VHOST_LOG(INFO,
641                                 "rxq-%d's kickfd is invalid, skip!\n", i);
642                         continue;
643                 }
644                 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
645                 dev->intr_handle->efds[i] = vring.kickfd;
646                 count++;
647                 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
648         }
649
650         dev->intr_handle->nb_efd = count;
651         dev->intr_handle->max_intr = count + 1;
652         dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
653
654         return 0;
655 }
656
657 static void
658 update_queuing_status(struct rte_eth_dev *dev)
659 {
660         struct pmd_internal *internal = dev->data->dev_private;
661         struct vhost_queue *vq;
662         unsigned int i;
663         int allow_queuing = 1;
664
665         if (!dev->data->rx_queues || !dev->data->tx_queues)
666                 return;
667
668         if (rte_atomic32_read(&internal->started) == 0 ||
669             rte_atomic32_read(&internal->dev_attached) == 0)
670                 allow_queuing = 0;
671
672         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
673         for (i = 0; i < dev->data->nb_rx_queues; i++) {
674                 vq = dev->data->rx_queues[i];
675                 if (vq == NULL)
676                         continue;
677                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
678                 while (rte_atomic32_read(&vq->while_queuing))
679                         rte_pause();
680         }
681
682         for (i = 0; i < dev->data->nb_tx_queues; i++) {
683                 vq = dev->data->tx_queues[i];
684                 if (vq == NULL)
685                         continue;
686                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
687                 while (rte_atomic32_read(&vq->while_queuing))
688                         rte_pause();
689         }
690 }
691
692 static void
693 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
694 {
695         struct vhost_queue *vq;
696         int i;
697
698         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
699                 vq = eth_dev->data->rx_queues[i];
700                 if (!vq)
701                         continue;
702                 vq->vid = internal->vid;
703                 vq->internal = internal;
704                 vq->port = eth_dev->data->port_id;
705         }
706         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
707                 vq = eth_dev->data->tx_queues[i];
708                 if (!vq)
709                         continue;
710                 vq->vid = internal->vid;
711                 vq->internal = internal;
712                 vq->port = eth_dev->data->port_id;
713         }
714 }
715
716 static int
717 new_device(int vid)
718 {
719         struct rte_eth_dev *eth_dev;
720         struct internal_list *list;
721         struct pmd_internal *internal;
722         struct rte_eth_conf *dev_conf;
723         unsigned i;
724         char ifname[PATH_MAX];
725 #ifdef RTE_LIBRTE_VHOST_NUMA
726         int newnode;
727 #endif
728
729         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
730         list = find_internal_resource(ifname);
731         if (list == NULL) {
732                 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
733                 return -1;
734         }
735
736         eth_dev = list->eth_dev;
737         internal = eth_dev->data->dev_private;
738         dev_conf = &eth_dev->data->dev_conf;
739
740 #ifdef RTE_LIBRTE_VHOST_NUMA
741         newnode = rte_vhost_get_numa_node(vid);
742         if (newnode >= 0)
743                 eth_dev->data->numa_node = newnode;
744 #endif
745
746         internal->vid = vid;
747         if (rte_atomic32_read(&internal->started) == 1) {
748                 queue_setup(eth_dev, internal);
749
750                 if (dev_conf->intr_conf.rxq) {
751                         if (eth_vhost_install_intr(eth_dev) < 0) {
752                                 VHOST_LOG(INFO,
753                                         "Failed to install interrupt handler.");
754                                         return -1;
755                         }
756                 }
757         } else {
758                 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
759         }
760
761         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
762                 rte_vhost_enable_guest_notification(vid, i, 0);
763
764         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
765
766         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
767
768         rte_atomic32_set(&internal->dev_attached, 1);
769         update_queuing_status(eth_dev);
770
771         VHOST_LOG(INFO, "Vhost device %d created\n", vid);
772
773         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
774
775         return 0;
776 }
777
778 static void
779 destroy_device(int vid)
780 {
781         struct rte_eth_dev *eth_dev;
782         struct pmd_internal *internal;
783         struct vhost_queue *vq;
784         struct internal_list *list;
785         char ifname[PATH_MAX];
786         unsigned i;
787         struct rte_vhost_vring_state *state;
788
789         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
790         list = find_internal_resource(ifname);
791         if (list == NULL) {
792                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
793                 return;
794         }
795         eth_dev = list->eth_dev;
796         internal = eth_dev->data->dev_private;
797
798         rte_atomic32_set(&internal->dev_attached, 0);
799         update_queuing_status(eth_dev);
800
801         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
802
803         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
804                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
805                         vq = eth_dev->data->rx_queues[i];
806                         if (!vq)
807                                 continue;
808                         vq->vid = -1;
809                 }
810                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
811                         vq = eth_dev->data->tx_queues[i];
812                         if (!vq)
813                                 continue;
814                         vq->vid = -1;
815                 }
816         }
817
818         state = vring_states[eth_dev->data->port_id];
819         rte_spinlock_lock(&state->lock);
820         for (i = 0; i <= state->max_vring; i++) {
821                 state->cur[i] = false;
822                 state->seen[i] = false;
823         }
824         state->max_vring = 0;
825         rte_spinlock_unlock(&state->lock);
826
827         VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
828         eth_vhost_uninstall_intr(eth_dev);
829
830         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
831 }
832
833 static int
834 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
835 {
836         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
837         struct pmd_internal *internal = eth_dev->data->dev_private;
838         struct rte_vhost_vring vring;
839         int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
840         int ret = 0;
841
842         /*
843          * The vring kickfd may be changed after the new device notification.
844          * Update it when the vring state is updated.
845          */
846         if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
847             rte_atomic32_read(&internal->dev_attached) &&
848             rte_atomic32_read(&internal->started) &&
849             dev_conf->intr_conf.rxq) {
850                 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
851                 if (ret) {
852                         VHOST_LOG(ERR, "Failed to get vring %d information.\n",
853                                         vring_id);
854                         return ret;
855                 }
856
857                 if (vring.kickfd != eth_dev->intr_handle->efds[rx_idx]) {
858                         VHOST_LOG(INFO, "kickfd for rxq-%d was changed.\n",
859                                           rx_idx);
860                         eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
861                 }
862         }
863
864         return ret;
865 }
866
867 static int
868 vring_state_changed(int vid, uint16_t vring, int enable)
869 {
870         struct rte_vhost_vring_state *state;
871         struct rte_eth_dev *eth_dev;
872         struct internal_list *list;
873         char ifname[PATH_MAX];
874
875         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
876         list = find_internal_resource(ifname);
877         if (list == NULL) {
878                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
879                 return -1;
880         }
881
882         eth_dev = list->eth_dev;
883         /* won't be NULL */
884         state = vring_states[eth_dev->data->port_id];
885
886         if (enable && vring_conf_update(vid, eth_dev, vring))
887                 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
888                           (int)vring);
889
890         rte_spinlock_lock(&state->lock);
891         if (state->cur[vring] == enable) {
892                 rte_spinlock_unlock(&state->lock);
893                 return 0;
894         }
895         state->cur[vring] = enable;
896         state->max_vring = RTE_MAX(vring, state->max_vring);
897         rte_spinlock_unlock(&state->lock);
898
899         VHOST_LOG(INFO, "vring%u is %s\n",
900                         vring, enable ? "enabled" : "disabled");
901
902         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
903
904         return 0;
905 }
906
907 static struct vhost_device_ops vhost_ops = {
908         .new_device          = new_device,
909         .destroy_device      = destroy_device,
910         .vring_state_changed = vring_state_changed,
911 };
912
913 static int
914 vhost_driver_setup(struct rte_eth_dev *eth_dev)
915 {
916         struct pmd_internal *internal = eth_dev->data->dev_private;
917         struct internal_list *list = NULL;
918         struct rte_vhost_vring_state *vring_state = NULL;
919         unsigned int numa_node = eth_dev->device->numa_node;
920         const char *name = eth_dev->device->name;
921
922         /* Don't try to setup again if it has already been done. */
923         list = find_internal_resource(internal->iface_name);
924         if (list)
925                 return 0;
926
927         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
928         if (list == NULL)
929                 return -1;
930
931         vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
932                                          0, numa_node);
933         if (vring_state == NULL)
934                 goto free_list;
935
936         list->eth_dev = eth_dev;
937         pthread_mutex_lock(&internal_list_lock);
938         TAILQ_INSERT_TAIL(&internal_list, list, next);
939         pthread_mutex_unlock(&internal_list_lock);
940
941         rte_spinlock_init(&vring_state->lock);
942         vring_states[eth_dev->data->port_id] = vring_state;
943
944         if (rte_vhost_driver_register(internal->iface_name, internal->flags))
945                 goto list_remove;
946
947         if (internal->disable_flags) {
948                 if (rte_vhost_driver_disable_features(internal->iface_name,
949                                                       internal->disable_flags))
950                         goto drv_unreg;
951         }
952
953         if (rte_vhost_driver_callback_register(internal->iface_name,
954                                                &vhost_ops) < 0) {
955                 VHOST_LOG(ERR, "Can't register callbacks\n");
956                 goto drv_unreg;
957         }
958
959         if (rte_vhost_driver_start(internal->iface_name) < 0) {
960                 VHOST_LOG(ERR, "Failed to start driver for %s\n",
961                           internal->iface_name);
962                 goto drv_unreg;
963         }
964
965         return 0;
966
967 drv_unreg:
968         rte_vhost_driver_unregister(internal->iface_name);
969 list_remove:
970         vring_states[eth_dev->data->port_id] = NULL;
971         pthread_mutex_lock(&internal_list_lock);
972         TAILQ_REMOVE(&internal_list, list, next);
973         pthread_mutex_unlock(&internal_list_lock);
974         rte_free(vring_state);
975 free_list:
976         rte_free(list);
977
978         return -1;
979 }
980
981 int
982 rte_eth_vhost_get_queue_event(uint16_t port_id,
983                 struct rte_eth_vhost_queue_event *event)
984 {
985         struct rte_vhost_vring_state *state;
986         unsigned int i;
987         int idx;
988
989         if (port_id >= RTE_MAX_ETHPORTS) {
990                 VHOST_LOG(ERR, "Invalid port id\n");
991                 return -1;
992         }
993
994         state = vring_states[port_id];
995         if (!state) {
996                 VHOST_LOG(ERR, "Unused port\n");
997                 return -1;
998         }
999
1000         rte_spinlock_lock(&state->lock);
1001         for (i = 0; i <= state->max_vring; i++) {
1002                 idx = state->index++ % (state->max_vring + 1);
1003
1004                 if (state->cur[idx] != state->seen[idx]) {
1005                         state->seen[idx] = state->cur[idx];
1006                         event->queue_id = idx / 2;
1007                         event->rx = idx & 1;
1008                         event->enable = state->cur[idx];
1009                         rte_spinlock_unlock(&state->lock);
1010                         return 0;
1011                 }
1012         }
1013         rte_spinlock_unlock(&state->lock);
1014
1015         return -1;
1016 }
1017
1018 int
1019 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1020 {
1021         struct internal_list *list;
1022         struct rte_eth_dev *eth_dev;
1023         struct vhost_queue *vq;
1024         int vid = -1;
1025
1026         if (!rte_eth_dev_is_valid_port(port_id))
1027                 return -1;
1028
1029         pthread_mutex_lock(&internal_list_lock);
1030
1031         TAILQ_FOREACH(list, &internal_list, next) {
1032                 eth_dev = list->eth_dev;
1033                 if (eth_dev->data->port_id == port_id) {
1034                         vq = eth_dev->data->rx_queues[0];
1035                         if (vq) {
1036                                 vid = vq->vid;
1037                         }
1038                         break;
1039                 }
1040         }
1041
1042         pthread_mutex_unlock(&internal_list_lock);
1043
1044         return vid;
1045 }
1046
1047 static int
1048 eth_dev_configure(struct rte_eth_dev *dev)
1049 {
1050         struct pmd_internal *internal = dev->data->dev_private;
1051         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1052
1053         /* NOTE: the same process has to operate a vhost interface
1054          * from beginning to end (from eth_dev configure to eth_dev close).
1055          * It is user's responsibility at the moment.
1056          */
1057         if (vhost_driver_setup(dev) < 0)
1058                 return -1;
1059
1060         internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1061
1062         return 0;
1063 }
1064
1065 static int
1066 eth_dev_start(struct rte_eth_dev *eth_dev)
1067 {
1068         struct pmd_internal *internal = eth_dev->data->dev_private;
1069         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
1070
1071         queue_setup(eth_dev, internal);
1072
1073         if (rte_atomic32_read(&internal->dev_attached) == 1) {
1074                 if (dev_conf->intr_conf.rxq) {
1075                         if (eth_vhost_install_intr(eth_dev) < 0) {
1076                                 VHOST_LOG(INFO,
1077                                         "Failed to install interrupt handler.");
1078                                         return -1;
1079                         }
1080                 }
1081         }
1082
1083         rte_atomic32_set(&internal->started, 1);
1084         update_queuing_status(eth_dev);
1085
1086         return 0;
1087 }
1088
1089 static void
1090 eth_dev_stop(struct rte_eth_dev *dev)
1091 {
1092         struct pmd_internal *internal = dev->data->dev_private;
1093
1094         rte_atomic32_set(&internal->started, 0);
1095         update_queuing_status(dev);
1096 }
1097
1098 static void
1099 eth_dev_close(struct rte_eth_dev *dev)
1100 {
1101         struct pmd_internal *internal;
1102         struct internal_list *list;
1103         unsigned int i;
1104
1105         internal = dev->data->dev_private;
1106         if (!internal)
1107                 return;
1108
1109         eth_dev_stop(dev);
1110
1111         list = find_internal_resource(internal->iface_name);
1112         if (list) {
1113                 rte_vhost_driver_unregister(internal->iface_name);
1114                 pthread_mutex_lock(&internal_list_lock);
1115                 TAILQ_REMOVE(&internal_list, list, next);
1116                 pthread_mutex_unlock(&internal_list_lock);
1117                 rte_free(list);
1118         }
1119
1120         if (dev->data->rx_queues)
1121                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1122                         rte_free(dev->data->rx_queues[i]);
1123
1124         if (dev->data->tx_queues)
1125                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1126                         rte_free(dev->data->tx_queues[i]);
1127
1128         rte_free(internal->iface_name);
1129         rte_free(internal);
1130
1131         dev->data->dev_private = NULL;
1132
1133         rte_free(vring_states[dev->data->port_id]);
1134         vring_states[dev->data->port_id] = NULL;
1135 }
1136
1137 static int
1138 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1139                    uint16_t nb_rx_desc __rte_unused,
1140                    unsigned int socket_id,
1141                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1142                    struct rte_mempool *mb_pool)
1143 {
1144         struct vhost_queue *vq;
1145
1146         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1147                         RTE_CACHE_LINE_SIZE, socket_id);
1148         if (vq == NULL) {
1149                 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1150                 return -ENOMEM;
1151         }
1152
1153         vq->mb_pool = mb_pool;
1154         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1155         dev->data->rx_queues[rx_queue_id] = vq;
1156
1157         return 0;
1158 }
1159
1160 static int
1161 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1162                    uint16_t nb_tx_desc __rte_unused,
1163                    unsigned int socket_id,
1164                    const struct rte_eth_txconf *tx_conf __rte_unused)
1165 {
1166         struct vhost_queue *vq;
1167
1168         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1169                         RTE_CACHE_LINE_SIZE, socket_id);
1170         if (vq == NULL) {
1171                 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1172                 return -ENOMEM;
1173         }
1174
1175         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1176         dev->data->tx_queues[tx_queue_id] = vq;
1177
1178         return 0;
1179 }
1180
1181 static int
1182 eth_dev_info(struct rte_eth_dev *dev,
1183              struct rte_eth_dev_info *dev_info)
1184 {
1185         struct pmd_internal *internal;
1186
1187         internal = dev->data->dev_private;
1188         if (internal == NULL) {
1189                 VHOST_LOG(ERR, "Invalid device specified\n");
1190                 return -ENODEV;
1191         }
1192
1193         dev_info->max_mac_addrs = 1;
1194         dev_info->max_rx_pktlen = (uint32_t)-1;
1195         dev_info->max_rx_queues = internal->max_queues;
1196         dev_info->max_tx_queues = internal->max_queues;
1197         dev_info->min_rx_bufsize = 0;
1198
1199         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1200                                 DEV_TX_OFFLOAD_VLAN_INSERT;
1201         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1202
1203         return 0;
1204 }
1205
1206 static int
1207 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1208 {
1209         unsigned i;
1210         unsigned long rx_total = 0, tx_total = 0;
1211         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1212         struct vhost_queue *vq;
1213
1214         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1215                         i < dev->data->nb_rx_queues; i++) {
1216                 if (dev->data->rx_queues[i] == NULL)
1217                         continue;
1218                 vq = dev->data->rx_queues[i];
1219                 stats->q_ipackets[i] = vq->stats.pkts;
1220                 rx_total += stats->q_ipackets[i];
1221
1222                 stats->q_ibytes[i] = vq->stats.bytes;
1223                 rx_total_bytes += stats->q_ibytes[i];
1224         }
1225
1226         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1227                         i < dev->data->nb_tx_queues; i++) {
1228                 if (dev->data->tx_queues[i] == NULL)
1229                         continue;
1230                 vq = dev->data->tx_queues[i];
1231                 stats->q_opackets[i] = vq->stats.pkts;
1232                 tx_total += stats->q_opackets[i];
1233
1234                 stats->q_obytes[i] = vq->stats.bytes;
1235                 tx_total_bytes += stats->q_obytes[i];
1236         }
1237
1238         stats->ipackets = rx_total;
1239         stats->opackets = tx_total;
1240         stats->ibytes = rx_total_bytes;
1241         stats->obytes = tx_total_bytes;
1242
1243         return 0;
1244 }
1245
1246 static int
1247 eth_stats_reset(struct rte_eth_dev *dev)
1248 {
1249         struct vhost_queue *vq;
1250         unsigned i;
1251
1252         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1253                 if (dev->data->rx_queues[i] == NULL)
1254                         continue;
1255                 vq = dev->data->rx_queues[i];
1256                 vq->stats.pkts = 0;
1257                 vq->stats.bytes = 0;
1258         }
1259         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1260                 if (dev->data->tx_queues[i] == NULL)
1261                         continue;
1262                 vq = dev->data->tx_queues[i];
1263                 vq->stats.pkts = 0;
1264                 vq->stats.bytes = 0;
1265                 vq->stats.missed_pkts = 0;
1266         }
1267
1268         return 0;
1269 }
1270
1271 static void
1272 eth_queue_release(void *q)
1273 {
1274         rte_free(q);
1275 }
1276
1277 static int
1278 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1279 {
1280         /*
1281          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1282          * and releases mbuf, so nothing to cleanup.
1283          */
1284         return 0;
1285 }
1286
1287 static int
1288 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1289                 int wait_to_complete __rte_unused)
1290 {
1291         return 0;
1292 }
1293
1294 static uint32_t
1295 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1296 {
1297         struct vhost_queue *vq;
1298
1299         vq = dev->data->rx_queues[rx_queue_id];
1300         if (vq == NULL)
1301                 return 0;
1302
1303         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1304 }
1305
1306 static const struct eth_dev_ops ops = {
1307         .dev_start = eth_dev_start,
1308         .dev_stop = eth_dev_stop,
1309         .dev_close = eth_dev_close,
1310         .dev_configure = eth_dev_configure,
1311         .dev_infos_get = eth_dev_info,
1312         .rx_queue_setup = eth_rx_queue_setup,
1313         .tx_queue_setup = eth_tx_queue_setup,
1314         .rx_queue_release = eth_queue_release,
1315         .tx_queue_release = eth_queue_release,
1316         .tx_done_cleanup = eth_tx_done_cleanup,
1317         .rx_queue_count = eth_rx_queue_count,
1318         .link_update = eth_link_update,
1319         .stats_get = eth_stats_get,
1320         .stats_reset = eth_stats_reset,
1321         .xstats_reset = vhost_dev_xstats_reset,
1322         .xstats_get = vhost_dev_xstats_get,
1323         .xstats_get_names = vhost_dev_xstats_get_names,
1324         .rx_queue_intr_enable = eth_rxq_intr_enable,
1325         .rx_queue_intr_disable = eth_rxq_intr_disable,
1326 };
1327
1328 static int
1329 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1330         int16_t queues, const unsigned int numa_node, uint64_t flags,
1331         uint64_t disable_flags)
1332 {
1333         const char *name = rte_vdev_device_name(dev);
1334         struct rte_eth_dev_data *data;
1335         struct pmd_internal *internal = NULL;
1336         struct rte_eth_dev *eth_dev = NULL;
1337         struct rte_ether_addr *eth_addr = NULL;
1338
1339         VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1340                 numa_node);
1341
1342         /* reserve an ethdev entry */
1343         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1344         if (eth_dev == NULL)
1345                 goto error;
1346         data = eth_dev->data;
1347
1348         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1349         if (eth_addr == NULL)
1350                 goto error;
1351         data->mac_addrs = eth_addr;
1352         *eth_addr = base_eth_addr;
1353         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1354
1355         /* now put it all together
1356          * - store queue data in internal,
1357          * - point eth_dev_data to internals
1358          * - and point eth_dev structure to new eth_dev_data structure
1359          */
1360         internal = eth_dev->data->dev_private;
1361         internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1362                                                  0, numa_node);
1363         if (internal->iface_name == NULL)
1364                 goto error;
1365         strcpy(internal->iface_name, iface_name);
1366
1367         data->nb_rx_queues = queues;
1368         data->nb_tx_queues = queues;
1369         internal->max_queues = queues;
1370         internal->vid = -1;
1371         internal->flags = flags;
1372         internal->disable_flags = disable_flags;
1373         data->dev_link = pmd_link;
1374         data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1375         data->promiscuous = 1;
1376         data->all_multicast = 1;
1377
1378         eth_dev->dev_ops = &ops;
1379
1380         /* finally assign rx and tx ops */
1381         eth_dev->rx_pkt_burst = eth_vhost_rx;
1382         eth_dev->tx_pkt_burst = eth_vhost_tx;
1383
1384         rte_eth_dev_probing_finish(eth_dev);
1385         return 0;
1386
1387 error:
1388         if (internal)
1389                 rte_free(internal->iface_name);
1390         rte_eth_dev_release_port(eth_dev);
1391
1392         return -1;
1393 }
1394
1395 static inline int
1396 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1397 {
1398         const char **iface_name = extra_args;
1399
1400         if (value == NULL)
1401                 return -1;
1402
1403         *iface_name = value;
1404
1405         return 0;
1406 }
1407
1408 static inline int
1409 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1410 {
1411         uint16_t *n = extra_args;
1412
1413         if (value == NULL || extra_args == NULL)
1414                 return -EINVAL;
1415
1416         *n = (uint16_t)strtoul(value, NULL, 0);
1417         if (*n == USHRT_MAX && errno == ERANGE)
1418                 return -1;
1419
1420         return 0;
1421 }
1422
1423 static int
1424 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1425 {
1426         struct rte_kvargs *kvlist = NULL;
1427         int ret = 0;
1428         char *iface_name;
1429         uint16_t queues;
1430         uint64_t flags = 0;
1431         uint64_t disable_flags = 0;
1432         int client_mode = 0;
1433         int dequeue_zero_copy = 0;
1434         int iommu_support = 0;
1435         int postcopy_support = 0;
1436         int tso = 0;
1437         int linear_buf = 0;
1438         int ext_buf = 0;
1439         struct rte_eth_dev *eth_dev;
1440         const char *name = rte_vdev_device_name(dev);
1441
1442         VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1443
1444         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1445                 eth_dev = rte_eth_dev_attach_secondary(name);
1446                 if (!eth_dev) {
1447                         VHOST_LOG(ERR, "Failed to probe %s\n", name);
1448                         return -1;
1449                 }
1450                 eth_dev->rx_pkt_burst = eth_vhost_rx;
1451                 eth_dev->tx_pkt_burst = eth_vhost_tx;
1452                 eth_dev->dev_ops = &ops;
1453                 if (dev->device.numa_node == SOCKET_ID_ANY)
1454                         dev->device.numa_node = rte_socket_id();
1455                 eth_dev->device = &dev->device;
1456                 rte_eth_dev_probing_finish(eth_dev);
1457                 return 0;
1458         }
1459
1460         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1461         if (kvlist == NULL)
1462                 return -1;
1463
1464         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1465                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1466                                          &open_iface, &iface_name);
1467                 if (ret < 0)
1468                         goto out_free;
1469         } else {
1470                 ret = -1;
1471                 goto out_free;
1472         }
1473
1474         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1475                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1476                                          &open_int, &queues);
1477                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1478                         goto out_free;
1479
1480         } else
1481                 queues = 1;
1482
1483         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1484                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1485                                          &open_int, &client_mode);
1486                 if (ret < 0)
1487                         goto out_free;
1488
1489                 if (client_mode)
1490                         flags |= RTE_VHOST_USER_CLIENT;
1491         }
1492
1493         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1494                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1495                                          &open_int, &dequeue_zero_copy);
1496                 if (ret < 0)
1497                         goto out_free;
1498
1499                 if (dequeue_zero_copy)
1500                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1501         }
1502
1503         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1504                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1505                                          &open_int, &iommu_support);
1506                 if (ret < 0)
1507                         goto out_free;
1508
1509                 if (iommu_support)
1510                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1511         }
1512
1513         if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1514                 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1515                                          &open_int, &postcopy_support);
1516                 if (ret < 0)
1517                         goto out_free;
1518
1519                 if (postcopy_support)
1520                         flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1521         }
1522
1523         if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1524                 ret = rte_kvargs_process(kvlist,
1525                                 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1526                                 &open_int, &tso);
1527                 if (ret < 0)
1528                         goto out_free;
1529
1530                 if (tso == 0) {
1531                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1532                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1533                 }
1534         }
1535
1536         if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1537                 ret = rte_kvargs_process(kvlist,
1538                                 ETH_VHOST_LINEAR_BUF,
1539                                 &open_int, &linear_buf);
1540                 if (ret < 0)
1541                         goto out_free;
1542
1543                 if (linear_buf == 1)
1544                         flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1545         }
1546
1547         if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1548                 ret = rte_kvargs_process(kvlist,
1549                                 ETH_VHOST_EXT_BUF,
1550                                 &open_int, &ext_buf);
1551                 if (ret < 0)
1552                         goto out_free;
1553
1554                 if (ext_buf == 1)
1555                         flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1556         }
1557
1558         if (dev->device.numa_node == SOCKET_ID_ANY)
1559                 dev->device.numa_node = rte_socket_id();
1560
1561         ret = eth_dev_vhost_create(dev, iface_name, queues,
1562                                    dev->device.numa_node, flags, disable_flags);
1563         if (ret == -1)
1564                 VHOST_LOG(ERR, "Failed to create %s\n", name);
1565
1566 out_free:
1567         rte_kvargs_free(kvlist);
1568         return ret;
1569 }
1570
1571 static int
1572 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1573 {
1574         const char *name;
1575         struct rte_eth_dev *eth_dev = NULL;
1576
1577         name = rte_vdev_device_name(dev);
1578         VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1579
1580         /* find an ethdev entry */
1581         eth_dev = rte_eth_dev_allocated(name);
1582         if (eth_dev == NULL)
1583                 return 0;
1584
1585         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1586                 return rte_eth_dev_release_port(eth_dev);
1587
1588         eth_dev_close(eth_dev);
1589
1590         rte_eth_dev_release_port(eth_dev);
1591
1592         return 0;
1593 }
1594
1595 static struct rte_vdev_driver pmd_vhost_drv = {
1596         .probe = rte_pmd_vhost_probe,
1597         .remove = rte_pmd_vhost_remove,
1598 };
1599
1600 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1601 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1602 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1603         "iface=<ifc> "
1604         "queues=<int> "
1605         "client=<0|1> "
1606         "dequeue-zero-copy=<0|1> "
1607         "iommu-support=<0|1> "
1608         "postcopy-support=<0|1> "
1609         "tso=<0|1> "
1610         "linear-buffer=<0|1> "
1611         "ext-buffer=<0|1>");