net/vhost: remove include of numaif.h
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_virtio_net.h>
44 #include <rte_spinlock.h>
45
46 #include "rte_eth_vhost.h"
47
48 #define ETH_VHOST_IFACE_ARG             "iface"
49 #define ETH_VHOST_QUEUES_ARG            "queues"
50 #define ETH_VHOST_CLIENT_ARG            "client"
51 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
52 #define VHOST_MAX_PKT_BURST 32
53
54 static const char *valid_arguments[] = {
55         ETH_VHOST_IFACE_ARG,
56         ETH_VHOST_QUEUES_ARG,
57         ETH_VHOST_CLIENT_ARG,
58         ETH_VHOST_DEQUEUE_ZERO_COPY,
59         NULL
60 };
61
62 static struct ether_addr base_eth_addr = {
63         .addr_bytes = {
64                 0x56 /* V */,
65                 0x48 /* H */,
66                 0x4F /* O */,
67                 0x53 /* S */,
68                 0x54 /* T */,
69                 0x00
70         }
71 };
72
73 enum vhost_xstats_pkts {
74         VHOST_UNDERSIZE_PKT = 0,
75         VHOST_64_PKT,
76         VHOST_65_TO_127_PKT,
77         VHOST_128_TO_255_PKT,
78         VHOST_256_TO_511_PKT,
79         VHOST_512_TO_1023_PKT,
80         VHOST_1024_TO_1522_PKT,
81         VHOST_1523_TO_MAX_PKT,
82         VHOST_BROADCAST_PKT,
83         VHOST_MULTICAST_PKT,
84         VHOST_UNICAST_PKT,
85         VHOST_ERRORS_PKT,
86         VHOST_ERRORS_FRAGMENTED,
87         VHOST_ERRORS_JABBER,
88         VHOST_UNKNOWN_PROTOCOL,
89         VHOST_XSTATS_MAX,
90 };
91
92 struct vhost_stats {
93         uint64_t pkts;
94         uint64_t bytes;
95         uint64_t missed_pkts;
96         uint64_t xstats[VHOST_XSTATS_MAX];
97 };
98
99 struct vhost_queue {
100         int vid;
101         rte_atomic32_t allow_queuing;
102         rte_atomic32_t while_queuing;
103         struct pmd_internal *internal;
104         struct rte_mempool *mb_pool;
105         uint8_t port;
106         uint16_t virtqueue_id;
107         struct vhost_stats stats;
108 };
109
110 struct pmd_internal {
111         rte_atomic32_t dev_attached;
112         char *dev_name;
113         char *iface_name;
114         uint16_t max_queues;
115         rte_atomic32_t started;
116 };
117
118 struct internal_list {
119         TAILQ_ENTRY(internal_list) next;
120         struct rte_eth_dev *eth_dev;
121 };
122
123 TAILQ_HEAD(internal_list_head, internal_list);
124 static struct internal_list_head internal_list =
125         TAILQ_HEAD_INITIALIZER(internal_list);
126
127 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
128
129 static rte_atomic16_t nb_started_ports;
130 static pthread_t session_th;
131
132 static struct rte_eth_link pmd_link = {
133                 .link_speed = 10000,
134                 .link_duplex = ETH_LINK_FULL_DUPLEX,
135                 .link_status = ETH_LINK_DOWN
136 };
137
138 struct rte_vhost_vring_state {
139         rte_spinlock_t lock;
140
141         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
142         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
143         unsigned int index;
144         unsigned int max_vring;
145 };
146
147 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
148
149 #define VHOST_XSTATS_NAME_SIZE 64
150
151 struct vhost_xstats_name_off {
152         char name[VHOST_XSTATS_NAME_SIZE];
153         uint64_t offset;
154 };
155
156 /* [rx]_is prepended to the name string here */
157 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
158         {"good_packets",
159          offsetof(struct vhost_queue, stats.pkts)},
160         {"total_bytes",
161          offsetof(struct vhost_queue, stats.bytes)},
162         {"missed_pkts",
163          offsetof(struct vhost_queue, stats.missed_pkts)},
164         {"broadcast_packets",
165          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
166         {"multicast_packets",
167          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
168         {"unicast_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
170          {"undersize_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
172         {"size_64_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
174         {"size_65_to_127_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
176         {"size_128_to_255_packets",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
178         {"size_256_to_511_packets",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
180         {"size_512_to_1023_packets",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
182         {"size_1024_to_1522_packets",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
184         {"size_1523_to_max_packets",
185          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
186         {"errors_with_bad_CRC",
187          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
188         {"fragmented_errors",
189          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
190         {"jabber_errors",
191          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
192         {"unknown_protos_packets",
193          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
194 };
195
196 /* [tx]_ is prepended to the name string here */
197 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
198         {"good_packets",
199          offsetof(struct vhost_queue, stats.pkts)},
200         {"total_bytes",
201          offsetof(struct vhost_queue, stats.bytes)},
202         {"missed_pkts",
203          offsetof(struct vhost_queue, stats.missed_pkts)},
204         {"broadcast_packets",
205          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
206         {"multicast_packets",
207          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
208         {"unicast_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
210         {"undersize_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
212         {"size_64_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
214         {"size_65_to_127_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
216         {"size_128_to_255_packets",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
218         {"size_256_to_511_packets",
219          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
220         {"size_512_to_1023_packets",
221          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
222         {"size_1024_to_1522_packets",
223          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
224         {"size_1523_to_max_packets",
225          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
226         {"errors_with_bad_CRC",
227          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
228 };
229
230 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
231                                 sizeof(vhost_rxport_stat_strings[0]))
232
233 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
234                                 sizeof(vhost_txport_stat_strings[0]))
235
236 static void
237 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
238 {
239         struct vhost_queue *vq = NULL;
240         unsigned int i = 0;
241
242         for (i = 0; i < dev->data->nb_rx_queues; i++) {
243                 vq = dev->data->rx_queues[i];
244                 if (!vq)
245                         continue;
246                 memset(&vq->stats, 0, sizeof(vq->stats));
247         }
248         for (i = 0; i < dev->data->nb_tx_queues; i++) {
249                 vq = dev->data->tx_queues[i];
250                 if (!vq)
251                         continue;
252                 memset(&vq->stats, 0, sizeof(vq->stats));
253         }
254 }
255
256 static int
257 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
258                            struct rte_eth_xstat_name *xstats_names,
259                            unsigned int limit __rte_unused)
260 {
261         unsigned int t = 0;
262         int count = 0;
263         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
264
265         if (!xstats_names)
266                 return nstats;
267         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
268                 snprintf(xstats_names[count].name,
269                          sizeof(xstats_names[count].name),
270                          "rx_%s", vhost_rxport_stat_strings[t].name);
271                 count++;
272         }
273         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
274                 snprintf(xstats_names[count].name,
275                          sizeof(xstats_names[count].name),
276                          "tx_%s", vhost_txport_stat_strings[t].name);
277                 count++;
278         }
279         return count;
280 }
281
282 static int
283 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
284                      unsigned int n)
285 {
286         unsigned int i;
287         unsigned int t;
288         unsigned int count = 0;
289         struct vhost_queue *vq = NULL;
290         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
291
292         if (n < nxstats)
293                 return nxstats;
294
295         for (i = 0; i < dev->data->nb_rx_queues; i++) {
296                 vq = dev->data->rx_queues[i];
297                 if (!vq)
298                         continue;
299                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
300                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
301                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
302         }
303         for (i = 0; i < dev->data->nb_tx_queues; i++) {
304                 vq = dev->data->tx_queues[i];
305                 if (!vq)
306                         continue;
307                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
308                                 + vq->stats.missed_pkts
309                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
310                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
311         }
312         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
313                 xstats[count].value = 0;
314                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
315                         vq = dev->data->rx_queues[i];
316                         if (!vq)
317                                 continue;
318                         xstats[count].value +=
319                                 *(uint64_t *)(((char *)vq)
320                                 + vhost_rxport_stat_strings[t].offset);
321                 }
322                 xstats[count].id = count;
323                 count++;
324         }
325         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
326                 xstats[count].value = 0;
327                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
328                         vq = dev->data->tx_queues[i];
329                         if (!vq)
330                                 continue;
331                         xstats[count].value +=
332                                 *(uint64_t *)(((char *)vq)
333                                 + vhost_txport_stat_strings[t].offset);
334                 }
335                 xstats[count].id = count;
336                 count++;
337         }
338         return count;
339 }
340
341 static inline void
342 vhost_count_multicast_broadcast(struct vhost_queue *vq,
343                                 struct rte_mbuf *mbuf)
344 {
345         struct ether_addr *ea = NULL;
346         struct vhost_stats *pstats = &vq->stats;
347
348         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
349         if (is_multicast_ether_addr(ea)) {
350                 if (is_broadcast_ether_addr(ea))
351                         pstats->xstats[VHOST_BROADCAST_PKT]++;
352                 else
353                         pstats->xstats[VHOST_MULTICAST_PKT]++;
354         }
355 }
356
357 static void
358 vhost_update_packet_xstats(struct vhost_queue *vq,
359                            struct rte_mbuf **bufs,
360                            uint16_t count)
361 {
362         uint32_t pkt_len = 0;
363         uint64_t i = 0;
364         uint64_t index;
365         struct vhost_stats *pstats = &vq->stats;
366
367         for (i = 0; i < count ; i++) {
368                 pkt_len = bufs[i]->pkt_len;
369                 if (pkt_len == 64) {
370                         pstats->xstats[VHOST_64_PKT]++;
371                 } else if (pkt_len > 64 && pkt_len < 1024) {
372                         index = (sizeof(pkt_len) * 8)
373                                 - __builtin_clz(pkt_len) - 5;
374                         pstats->xstats[index]++;
375                 } else {
376                         if (pkt_len < 64)
377                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
378                         else if (pkt_len <= 1522)
379                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
380                         else if (pkt_len > 1522)
381                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
382                 }
383                 vhost_count_multicast_broadcast(vq, bufs[i]);
384         }
385 }
386
387 static uint16_t
388 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
389 {
390         struct vhost_queue *r = q;
391         uint16_t i, nb_rx = 0;
392         uint16_t nb_receive = nb_bufs;
393
394         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
395                 return 0;
396
397         rte_atomic32_set(&r->while_queuing, 1);
398
399         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
400                 goto out;
401
402         /* Dequeue packets from guest TX queue */
403         while (nb_receive) {
404                 uint16_t nb_pkts;
405                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
406                                                  VHOST_MAX_PKT_BURST);
407
408                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
409                                                   r->mb_pool, &bufs[nb_rx],
410                                                   num);
411
412                 nb_rx += nb_pkts;
413                 nb_receive -= nb_pkts;
414                 if (nb_pkts < num)
415                         break;
416         }
417
418         r->stats.pkts += nb_rx;
419
420         for (i = 0; likely(i < nb_rx); i++) {
421                 bufs[i]->port = r->port;
422                 r->stats.bytes += bufs[i]->pkt_len;
423         }
424
425         vhost_update_packet_xstats(r, bufs, nb_rx);
426
427 out:
428         rte_atomic32_set(&r->while_queuing, 0);
429
430         return nb_rx;
431 }
432
433 static uint16_t
434 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
435 {
436         struct vhost_queue *r = q;
437         uint16_t i, nb_tx = 0;
438         uint16_t nb_send = nb_bufs;
439
440         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
441                 return 0;
442
443         rte_atomic32_set(&r->while_queuing, 1);
444
445         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
446                 goto out;
447
448         /* Enqueue packets to guest RX queue */
449         while (nb_send) {
450                 uint16_t nb_pkts;
451                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
452                                                  VHOST_MAX_PKT_BURST);
453
454                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
455                                                   &bufs[nb_tx], num);
456
457                 nb_tx += nb_pkts;
458                 nb_send -= nb_pkts;
459                 if (nb_pkts < num)
460                         break;
461         }
462
463         r->stats.pkts += nb_tx;
464         r->stats.missed_pkts += nb_bufs - nb_tx;
465
466         for (i = 0; likely(i < nb_tx); i++)
467                 r->stats.bytes += bufs[i]->pkt_len;
468
469         vhost_update_packet_xstats(r, bufs, nb_tx);
470
471         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
472          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
473          * are increased when packets are not transmitted successfully.
474          */
475         for (i = nb_tx; i < nb_bufs; i++)
476                 vhost_count_multicast_broadcast(r, bufs[i]);
477
478         for (i = 0; likely(i < nb_tx); i++)
479                 rte_pktmbuf_free(bufs[i]);
480 out:
481         rte_atomic32_set(&r->while_queuing, 0);
482
483         return nb_tx;
484 }
485
486 static int
487 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
488 {
489         return 0;
490 }
491
492 static inline struct internal_list *
493 find_internal_resource(char *ifname)
494 {
495         int found = 0;
496         struct internal_list *list;
497         struct pmd_internal *internal;
498
499         if (ifname == NULL)
500                 return NULL;
501
502         pthread_mutex_lock(&internal_list_lock);
503
504         TAILQ_FOREACH(list, &internal_list, next) {
505                 internal = list->eth_dev->data->dev_private;
506                 if (!strcmp(internal->iface_name, ifname)) {
507                         found = 1;
508                         break;
509                 }
510         }
511
512         pthread_mutex_unlock(&internal_list_lock);
513
514         if (!found)
515                 return NULL;
516
517         return list;
518 }
519
520 static void
521 update_queuing_status(struct rte_eth_dev *dev)
522 {
523         struct pmd_internal *internal = dev->data->dev_private;
524         struct vhost_queue *vq;
525         unsigned int i;
526         int allow_queuing = 1;
527
528         if (rte_atomic32_read(&internal->started) == 0 ||
529             rte_atomic32_read(&internal->dev_attached) == 0)
530                 allow_queuing = 0;
531
532         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
533         for (i = 0; i < dev->data->nb_rx_queues; i++) {
534                 vq = dev->data->rx_queues[i];
535                 if (vq == NULL)
536                         continue;
537                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
538                 while (rte_atomic32_read(&vq->while_queuing))
539                         rte_pause();
540         }
541
542         for (i = 0; i < dev->data->nb_tx_queues; i++) {
543                 vq = dev->data->tx_queues[i];
544                 if (vq == NULL)
545                         continue;
546                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
547                 while (rte_atomic32_read(&vq->while_queuing))
548                         rte_pause();
549         }
550 }
551
552 static int
553 new_device(int vid)
554 {
555         struct rte_eth_dev *eth_dev;
556         struct internal_list *list;
557         struct pmd_internal *internal;
558         struct vhost_queue *vq;
559         unsigned i;
560         char ifname[PATH_MAX];
561 #ifdef RTE_LIBRTE_VHOST_NUMA
562         int newnode;
563 #endif
564
565         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
566         list = find_internal_resource(ifname);
567         if (list == NULL) {
568                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
569                 return -1;
570         }
571
572         eth_dev = list->eth_dev;
573         internal = eth_dev->data->dev_private;
574
575 #ifdef RTE_LIBRTE_VHOST_NUMA
576         newnode = rte_vhost_get_numa_node(vid);
577         if (newnode >= 0)
578                 eth_dev->data->numa_node = newnode;
579 #endif
580
581         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
582                 vq = eth_dev->data->rx_queues[i];
583                 if (vq == NULL)
584                         continue;
585                 vq->vid = vid;
586                 vq->internal = internal;
587                 vq->port = eth_dev->data->port_id;
588         }
589         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
590                 vq = eth_dev->data->tx_queues[i];
591                 if (vq == NULL)
592                         continue;
593                 vq->vid = vid;
594                 vq->internal = internal;
595                 vq->port = eth_dev->data->port_id;
596         }
597
598         for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
599                 rte_vhost_enable_guest_notification(vid, i, 0);
600
601         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
602
603         rte_atomic32_set(&internal->dev_attached, 1);
604         update_queuing_status(eth_dev);
605
606         RTE_LOG(INFO, PMD, "New connection established\n");
607
608         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
609
610         return 0;
611 }
612
613 static void
614 destroy_device(int vid)
615 {
616         struct rte_eth_dev *eth_dev;
617         struct pmd_internal *internal;
618         struct vhost_queue *vq;
619         struct internal_list *list;
620         char ifname[PATH_MAX];
621         unsigned i;
622         struct rte_vhost_vring_state *state;
623
624         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
625         list = find_internal_resource(ifname);
626         if (list == NULL) {
627                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
628                 return;
629         }
630         eth_dev = list->eth_dev;
631         internal = eth_dev->data->dev_private;
632
633         rte_atomic32_set(&internal->dev_attached, 0);
634         update_queuing_status(eth_dev);
635
636         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
637
638         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
639                 vq = eth_dev->data->rx_queues[i];
640                 if (vq == NULL)
641                         continue;
642                 vq->vid = -1;
643         }
644         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
645                 vq = eth_dev->data->tx_queues[i];
646                 if (vq == NULL)
647                         continue;
648                 vq->vid = -1;
649         }
650
651         state = vring_states[eth_dev->data->port_id];
652         rte_spinlock_lock(&state->lock);
653         for (i = 0; i <= state->max_vring; i++) {
654                 state->cur[i] = false;
655                 state->seen[i] = false;
656         }
657         state->max_vring = 0;
658         rte_spinlock_unlock(&state->lock);
659
660         RTE_LOG(INFO, PMD, "Connection closed\n");
661
662         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
663 }
664
665 static int
666 vring_state_changed(int vid, uint16_t vring, int enable)
667 {
668         struct rte_vhost_vring_state *state;
669         struct rte_eth_dev *eth_dev;
670         struct internal_list *list;
671         char ifname[PATH_MAX];
672
673         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
674         list = find_internal_resource(ifname);
675         if (list == NULL) {
676                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
677                 return -1;
678         }
679
680         eth_dev = list->eth_dev;
681         /* won't be NULL */
682         state = vring_states[eth_dev->data->port_id];
683         rte_spinlock_lock(&state->lock);
684         state->cur[vring] = enable;
685         state->max_vring = RTE_MAX(vring, state->max_vring);
686         rte_spinlock_unlock(&state->lock);
687
688         RTE_LOG(INFO, PMD, "vring%u is %s\n",
689                         vring, enable ? "enabled" : "disabled");
690
691         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
692
693         return 0;
694 }
695
696 int
697 rte_eth_vhost_get_queue_event(uint8_t port_id,
698                 struct rte_eth_vhost_queue_event *event)
699 {
700         struct rte_vhost_vring_state *state;
701         unsigned int i;
702         int idx;
703
704         if (port_id >= RTE_MAX_ETHPORTS) {
705                 RTE_LOG(ERR, PMD, "Invalid port id\n");
706                 return -1;
707         }
708
709         state = vring_states[port_id];
710         if (!state) {
711                 RTE_LOG(ERR, PMD, "Unused port\n");
712                 return -1;
713         }
714
715         rte_spinlock_lock(&state->lock);
716         for (i = 0; i <= state->max_vring; i++) {
717                 idx = state->index++ % (state->max_vring + 1);
718
719                 if (state->cur[idx] != state->seen[idx]) {
720                         state->seen[idx] = state->cur[idx];
721                         event->queue_id = idx / 2;
722                         event->rx = idx & 1;
723                         event->enable = state->cur[idx];
724                         rte_spinlock_unlock(&state->lock);
725                         return 0;
726                 }
727         }
728         rte_spinlock_unlock(&state->lock);
729
730         return -1;
731 }
732
733 int
734 rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
735 {
736         struct internal_list *list;
737         struct rte_eth_dev *eth_dev;
738         struct vhost_queue *vq;
739         int vid = -1;
740
741         if (!rte_eth_dev_is_valid_port(port_id))
742                 return -1;
743
744         pthread_mutex_lock(&internal_list_lock);
745
746         TAILQ_FOREACH(list, &internal_list, next) {
747                 eth_dev = list->eth_dev;
748                 if (eth_dev->data->port_id == port_id) {
749                         vq = eth_dev->data->rx_queues[0];
750                         if (vq) {
751                                 vid = vq->vid;
752                         }
753                         break;
754                 }
755         }
756
757         pthread_mutex_unlock(&internal_list_lock);
758
759         return vid;
760 }
761
762 static void *
763 vhost_driver_session(void *param __rte_unused)
764 {
765         static struct virtio_net_device_ops vhost_ops;
766
767         /* set vhost arguments */
768         vhost_ops.new_device = new_device;
769         vhost_ops.destroy_device = destroy_device;
770         vhost_ops.vring_state_changed = vring_state_changed;
771         if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
772                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
773
774         /* start event handling */
775         rte_vhost_driver_session_start();
776
777         return NULL;
778 }
779
780 static int
781 vhost_driver_session_start(void)
782 {
783         int ret;
784
785         ret = pthread_create(&session_th,
786                         NULL, vhost_driver_session, NULL);
787         if (ret)
788                 RTE_LOG(ERR, PMD, "Can't create a thread\n");
789
790         return ret;
791 }
792
793 static void
794 vhost_driver_session_stop(void)
795 {
796         int ret;
797
798         ret = pthread_cancel(session_th);
799         if (ret)
800                 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
801
802         ret = pthread_join(session_th, NULL);
803         if (ret)
804                 RTE_LOG(ERR, PMD, "Can't join the thread\n");
805 }
806
807 static int
808 eth_dev_start(struct rte_eth_dev *dev)
809 {
810         struct pmd_internal *internal = dev->data->dev_private;
811
812         rte_atomic32_set(&internal->started, 1);
813         update_queuing_status(dev);
814
815         return 0;
816 }
817
818 static void
819 eth_dev_stop(struct rte_eth_dev *dev)
820 {
821         struct pmd_internal *internal = dev->data->dev_private;
822
823         rte_atomic32_set(&internal->started, 0);
824         update_queuing_status(dev);
825 }
826
827 static void
828 eth_dev_close(struct rte_eth_dev *dev)
829 {
830         struct pmd_internal *internal;
831         struct internal_list *list;
832
833         internal = dev->data->dev_private;
834         if (!internal)
835                 return;
836
837         rte_vhost_driver_unregister(internal->iface_name);
838
839         list = find_internal_resource(internal->iface_name);
840         if (!list)
841                 return;
842
843         pthread_mutex_lock(&internal_list_lock);
844         TAILQ_REMOVE(&internal_list, list, next);
845         pthread_mutex_unlock(&internal_list_lock);
846         rte_free(list);
847
848         free(internal->dev_name);
849         free(internal->iface_name);
850         rte_free(internal);
851 }
852
853 static int
854 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
855                    uint16_t nb_rx_desc __rte_unused,
856                    unsigned int socket_id,
857                    const struct rte_eth_rxconf *rx_conf __rte_unused,
858                    struct rte_mempool *mb_pool)
859 {
860         struct vhost_queue *vq;
861
862         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
863                         RTE_CACHE_LINE_SIZE, socket_id);
864         if (vq == NULL) {
865                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
866                 return -ENOMEM;
867         }
868
869         vq->mb_pool = mb_pool;
870         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
871         dev->data->rx_queues[rx_queue_id] = vq;
872
873         return 0;
874 }
875
876 static int
877 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
878                    uint16_t nb_tx_desc __rte_unused,
879                    unsigned int socket_id,
880                    const struct rte_eth_txconf *tx_conf __rte_unused)
881 {
882         struct vhost_queue *vq;
883
884         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
885                         RTE_CACHE_LINE_SIZE, socket_id);
886         if (vq == NULL) {
887                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
888                 return -ENOMEM;
889         }
890
891         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
892         dev->data->tx_queues[tx_queue_id] = vq;
893
894         return 0;
895 }
896
897 static void
898 eth_dev_info(struct rte_eth_dev *dev,
899              struct rte_eth_dev_info *dev_info)
900 {
901         struct pmd_internal *internal;
902
903         internal = dev->data->dev_private;
904         if (internal == NULL) {
905                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
906                 return;
907         }
908
909         dev_info->max_mac_addrs = 1;
910         dev_info->max_rx_pktlen = (uint32_t)-1;
911         dev_info->max_rx_queues = internal->max_queues;
912         dev_info->max_tx_queues = internal->max_queues;
913         dev_info->min_rx_bufsize = 0;
914 }
915
916 static void
917 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
918 {
919         unsigned i;
920         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
921         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
922         struct vhost_queue *vq;
923
924         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
925                         i < dev->data->nb_rx_queues; i++) {
926                 if (dev->data->rx_queues[i] == NULL)
927                         continue;
928                 vq = dev->data->rx_queues[i];
929                 stats->q_ipackets[i] = vq->stats.pkts;
930                 rx_total += stats->q_ipackets[i];
931
932                 stats->q_ibytes[i] = vq->stats.bytes;
933                 rx_total_bytes += stats->q_ibytes[i];
934         }
935
936         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
937                         i < dev->data->nb_tx_queues; i++) {
938                 if (dev->data->tx_queues[i] == NULL)
939                         continue;
940                 vq = dev->data->tx_queues[i];
941                 stats->q_opackets[i] = vq->stats.pkts;
942                 tx_missed_total += vq->stats.missed_pkts;
943                 tx_total += stats->q_opackets[i];
944
945                 stats->q_obytes[i] = vq->stats.bytes;
946                 tx_total_bytes += stats->q_obytes[i];
947         }
948
949         stats->ipackets = rx_total;
950         stats->opackets = tx_total;
951         stats->oerrors = tx_missed_total;
952         stats->ibytes = rx_total_bytes;
953         stats->obytes = tx_total_bytes;
954 }
955
956 static void
957 eth_stats_reset(struct rte_eth_dev *dev)
958 {
959         struct vhost_queue *vq;
960         unsigned i;
961
962         for (i = 0; i < dev->data->nb_rx_queues; i++) {
963                 if (dev->data->rx_queues[i] == NULL)
964                         continue;
965                 vq = dev->data->rx_queues[i];
966                 vq->stats.pkts = 0;
967                 vq->stats.bytes = 0;
968         }
969         for (i = 0; i < dev->data->nb_tx_queues; i++) {
970                 if (dev->data->tx_queues[i] == NULL)
971                         continue;
972                 vq = dev->data->tx_queues[i];
973                 vq->stats.pkts = 0;
974                 vq->stats.bytes = 0;
975                 vq->stats.missed_pkts = 0;
976         }
977 }
978
979 static void
980 eth_queue_release(void *q)
981 {
982         rte_free(q);
983 }
984
985 static int
986 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
987 {
988         /*
989          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
990          * and releases mbuf, so nothing to cleanup.
991          */
992         return 0;
993 }
994
995 static int
996 eth_link_update(struct rte_eth_dev *dev __rte_unused,
997                 int wait_to_complete __rte_unused)
998 {
999         return 0;
1000 }
1001
1002 /**
1003  * Disable features in feature_mask. Returns 0 on success.
1004  */
1005 int
1006 rte_eth_vhost_feature_disable(uint64_t feature_mask)
1007 {
1008         return rte_vhost_feature_disable(feature_mask);
1009 }
1010
1011 /**
1012  * Enable features in feature_mask. Returns 0 on success.
1013  */
1014 int
1015 rte_eth_vhost_feature_enable(uint64_t feature_mask)
1016 {
1017         return rte_vhost_feature_enable(feature_mask);
1018 }
1019
1020 /* Returns currently supported vhost features */
1021 uint64_t
1022 rte_eth_vhost_feature_get(void)
1023 {
1024         return rte_vhost_feature_get();
1025 }
1026
1027 static const struct eth_dev_ops ops = {
1028         .dev_start = eth_dev_start,
1029         .dev_stop = eth_dev_stop,
1030         .dev_close = eth_dev_close,
1031         .dev_configure = eth_dev_configure,
1032         .dev_infos_get = eth_dev_info,
1033         .rx_queue_setup = eth_rx_queue_setup,
1034         .tx_queue_setup = eth_tx_queue_setup,
1035         .rx_queue_release = eth_queue_release,
1036         .tx_queue_release = eth_queue_release,
1037         .tx_done_cleanup = eth_tx_done_cleanup,
1038         .link_update = eth_link_update,
1039         .stats_get = eth_stats_get,
1040         .stats_reset = eth_stats_reset,
1041         .xstats_reset = vhost_dev_xstats_reset,
1042         .xstats_get = vhost_dev_xstats_get,
1043         .xstats_get_names = vhost_dev_xstats_get_names,
1044 };
1045
1046 static struct rte_vdev_driver pmd_vhost_drv;
1047
1048 static int
1049 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
1050                      const unsigned numa_node, uint64_t flags)
1051 {
1052         struct rte_eth_dev_data *data = NULL;
1053         struct pmd_internal *internal = NULL;
1054         struct rte_eth_dev *eth_dev = NULL;
1055         struct ether_addr *eth_addr = NULL;
1056         struct rte_vhost_vring_state *vring_state = NULL;
1057         struct internal_list *list = NULL;
1058
1059         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1060                 numa_node);
1061
1062         /* now do all data allocation - for eth_dev structure, dummy pci driver
1063          * and internal (private) data
1064          */
1065         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1066         if (data == NULL)
1067                 goto error;
1068
1069         internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
1070         if (internal == NULL)
1071                 goto error;
1072
1073         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1074         if (list == NULL)
1075                 goto error;
1076
1077         /* reserve an ethdev entry */
1078         eth_dev = rte_eth_dev_allocate(name);
1079         if (eth_dev == NULL)
1080                 goto error;
1081
1082         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1083         if (eth_addr == NULL)
1084                 goto error;
1085         *eth_addr = base_eth_addr;
1086         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1087
1088         vring_state = rte_zmalloc_socket(name,
1089                         sizeof(*vring_state), 0, numa_node);
1090         if (vring_state == NULL)
1091                 goto error;
1092
1093         /* now put it all together
1094          * - store queue data in internal,
1095          * - store numa_node info in ethdev data
1096          * - point eth_dev_data to internals
1097          * - and point eth_dev structure to new eth_dev_data structure
1098          */
1099         internal->dev_name = strdup(name);
1100         if (internal->dev_name == NULL)
1101                 goto error;
1102         internal->iface_name = strdup(iface_name);
1103         if (internal->iface_name == NULL)
1104                 goto error;
1105
1106         list->eth_dev = eth_dev;
1107         pthread_mutex_lock(&internal_list_lock);
1108         TAILQ_INSERT_TAIL(&internal_list, list, next);
1109         pthread_mutex_unlock(&internal_list_lock);
1110
1111         rte_spinlock_init(&vring_state->lock);
1112         vring_states[eth_dev->data->port_id] = vring_state;
1113
1114         data->dev_private = internal;
1115         data->port_id = eth_dev->data->port_id;
1116         memmove(data->name, eth_dev->data->name, sizeof(data->name));
1117         data->nb_rx_queues = queues;
1118         data->nb_tx_queues = queues;
1119         internal->max_queues = queues;
1120         data->dev_link = pmd_link;
1121         data->mac_addrs = eth_addr;
1122
1123         /* We'll replace the 'data' originally allocated by eth_dev. So the
1124          * vhost PMD resources won't be shared between multi processes.
1125          */
1126         eth_dev->data = data;
1127         eth_dev->dev_ops = &ops;
1128         eth_dev->driver = NULL;
1129         data->dev_flags =
1130                 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1131         data->kdrv = RTE_KDRV_NONE;
1132         data->drv_name = pmd_vhost_drv.driver.name;
1133         data->numa_node = numa_node;
1134
1135         /* finally assign rx and tx ops */
1136         eth_dev->rx_pkt_burst = eth_vhost_rx;
1137         eth_dev->tx_pkt_burst = eth_vhost_tx;
1138
1139         if (rte_vhost_driver_register(iface_name, flags))
1140                 goto error;
1141
1142         /* We need only one message handling thread */
1143         if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
1144                 if (vhost_driver_session_start())
1145                         goto error;
1146         }
1147
1148         return data->port_id;
1149
1150 error:
1151         if (internal)
1152                 free(internal->dev_name);
1153         rte_free(vring_state);
1154         rte_free(eth_addr);
1155         if (eth_dev)
1156                 rte_eth_dev_release_port(eth_dev);
1157         rte_free(internal);
1158         rte_free(list);
1159         rte_free(data);
1160
1161         return -1;
1162 }
1163
1164 static inline int
1165 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1166 {
1167         const char **iface_name = extra_args;
1168
1169         if (value == NULL)
1170                 return -1;
1171
1172         *iface_name = value;
1173
1174         return 0;
1175 }
1176
1177 static inline int
1178 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1179 {
1180         uint16_t *n = extra_args;
1181
1182         if (value == NULL || extra_args == NULL)
1183                 return -EINVAL;
1184
1185         *n = (uint16_t)strtoul(value, NULL, 0);
1186         if (*n == USHRT_MAX && errno == ERANGE)
1187                 return -1;
1188
1189         return 0;
1190 }
1191
1192 static int
1193 rte_pmd_vhost_probe(const char *name, const char *params)
1194 {
1195         struct rte_kvargs *kvlist = NULL;
1196         int ret = 0;
1197         char *iface_name;
1198         uint16_t queues;
1199         uint64_t flags = 0;
1200         int client_mode = 0;
1201         int dequeue_zero_copy = 0;
1202
1203         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
1204
1205         kvlist = rte_kvargs_parse(params, valid_arguments);
1206         if (kvlist == NULL)
1207                 return -1;
1208
1209         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1210                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1211                                          &open_iface, &iface_name);
1212                 if (ret < 0)
1213                         goto out_free;
1214         } else {
1215                 ret = -1;
1216                 goto out_free;
1217         }
1218
1219         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1220                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1221                                          &open_int, &queues);
1222                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1223                         goto out_free;
1224
1225         } else
1226                 queues = 1;
1227
1228         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1229                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1230                                          &open_int, &client_mode);
1231                 if (ret < 0)
1232                         goto out_free;
1233
1234                 if (client_mode)
1235                         flags |= RTE_VHOST_USER_CLIENT;
1236         }
1237
1238         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1239                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1240                                          &open_int, &dequeue_zero_copy);
1241                 if (ret < 0)
1242                         goto out_free;
1243
1244                 if (dequeue_zero_copy)
1245                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1246         }
1247
1248         eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
1249
1250 out_free:
1251         rte_kvargs_free(kvlist);
1252         return ret;
1253 }
1254
1255 static int
1256 rte_pmd_vhost_remove(const char *name)
1257 {
1258         struct rte_eth_dev *eth_dev = NULL;
1259         unsigned int i;
1260
1261         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1262
1263         /* find an ethdev entry */
1264         eth_dev = rte_eth_dev_allocated(name);
1265         if (eth_dev == NULL)
1266                 return -ENODEV;
1267
1268         eth_dev_stop(eth_dev);
1269
1270         eth_dev_close(eth_dev);
1271
1272         if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
1273                 vhost_driver_session_stop();
1274
1275         rte_free(vring_states[eth_dev->data->port_id]);
1276         vring_states[eth_dev->data->port_id] = NULL;
1277
1278         for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
1279                 rte_free(eth_dev->data->rx_queues[i]);
1280         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1281                 rte_free(eth_dev->data->tx_queues[i]);
1282
1283         rte_free(eth_dev->data->mac_addrs);
1284         rte_free(eth_dev->data);
1285
1286         rte_eth_dev_release_port(eth_dev);
1287
1288         return 0;
1289 }
1290
1291 static struct rte_vdev_driver pmd_vhost_drv = {
1292         .probe = rte_pmd_vhost_probe,
1293         .remove = rte_pmd_vhost_remove,
1294 };
1295
1296 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1297 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1298 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1299         "iface=<ifc> "
1300         "queues=<int>");