5c1690d00e53243c12739479471251ce4cb6675f
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36 #ifdef RTE_LIBRTE_VHOST_NUMA
37 #include <numaif.h>
38 #endif
39
40 #include <rte_mbuf.h>
41 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_dev.h>
45 #include <rte_kvargs.h>
46 #include <rte_virtio_net.h>
47 #include <rte_spinlock.h>
48
49 #include "rte_eth_vhost.h"
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53
54 static const char *drivername = "VHOST PMD";
55
56 static const char *valid_arguments[] = {
57         ETH_VHOST_IFACE_ARG,
58         ETH_VHOST_QUEUES_ARG,
59         NULL
60 };
61
62 static struct ether_addr base_eth_addr = {
63         .addr_bytes = {
64                 0x56 /* V */,
65                 0x48 /* H */,
66                 0x4F /* O */,
67                 0x53 /* S */,
68                 0x54 /* T */,
69                 0x00
70         }
71 };
72
73 struct vhost_queue {
74         rte_atomic32_t allow_queuing;
75         rte_atomic32_t while_queuing;
76         struct virtio_net *device;
77         struct pmd_internal *internal;
78         struct rte_mempool *mb_pool;
79         uint8_t port;
80         uint16_t virtqueue_id;
81         uint64_t rx_pkts;
82         uint64_t tx_pkts;
83         uint64_t missed_pkts;
84         uint64_t rx_bytes;
85         uint64_t tx_bytes;
86 };
87
88 struct pmd_internal {
89         char *dev_name;
90         char *iface_name;
91         uint16_t max_queues;
92
93         volatile uint16_t once;
94 };
95
96 struct internal_list {
97         TAILQ_ENTRY(internal_list) next;
98         struct rte_eth_dev *eth_dev;
99 };
100
101 TAILQ_HEAD(internal_list_head, internal_list);
102 static struct internal_list_head internal_list =
103         TAILQ_HEAD_INITIALIZER(internal_list);
104
105 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
106
107 static rte_atomic16_t nb_started_ports;
108 static pthread_t session_th;
109
110 static struct rte_eth_link pmd_link = {
111                 .link_speed = 10000,
112                 .link_duplex = ETH_LINK_FULL_DUPLEX,
113                 .link_status = ETH_LINK_DOWN
114 };
115
116 struct rte_vhost_vring_state {
117         rte_spinlock_t lock;
118
119         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
120         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
121         unsigned int index;
122         unsigned int max_vring;
123 };
124
125 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
126
127 static uint16_t
128 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129 {
130         struct vhost_queue *r = q;
131         uint16_t i, nb_rx = 0;
132
133         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
134                 return 0;
135
136         rte_atomic32_set(&r->while_queuing, 1);
137
138         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
139                 goto out;
140
141         /* Dequeue packets from guest TX queue */
142         nb_rx = rte_vhost_dequeue_burst(r->device,
143                         r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
144
145         r->rx_pkts += nb_rx;
146
147         for (i = 0; likely(i < nb_rx); i++) {
148                 bufs[i]->port = r->port;
149                 r->rx_bytes += bufs[i]->pkt_len;
150         }
151
152 out:
153         rte_atomic32_set(&r->while_queuing, 0);
154
155         return nb_rx;
156 }
157
158 static uint16_t
159 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         struct vhost_queue *r = q;
162         uint16_t i, nb_tx = 0;
163
164         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
165                 return 0;
166
167         rte_atomic32_set(&r->while_queuing, 1);
168
169         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
170                 goto out;
171
172         /* Enqueue packets to guest RX queue */
173         nb_tx = rte_vhost_enqueue_burst(r->device,
174                         r->virtqueue_id, bufs, nb_bufs);
175
176         r->tx_pkts += nb_tx;
177         r->missed_pkts += nb_bufs - nb_tx;
178
179         for (i = 0; likely(i < nb_tx); i++)
180                 r->tx_bytes += bufs[i]->pkt_len;
181
182         for (i = 0; likely(i < nb_tx); i++)
183                 rte_pktmbuf_free(bufs[i]);
184 out:
185         rte_atomic32_set(&r->while_queuing, 0);
186
187         return nb_tx;
188 }
189
190 static int
191 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
192 {
193         return 0;
194 }
195
196 static inline struct internal_list *
197 find_internal_resource(char *ifname)
198 {
199         int found = 0;
200         struct internal_list *list;
201         struct pmd_internal *internal;
202
203         if (ifname == NULL)
204                 return NULL;
205
206         pthread_mutex_lock(&internal_list_lock);
207
208         TAILQ_FOREACH(list, &internal_list, next) {
209                 internal = list->eth_dev->data->dev_private;
210                 if (!strcmp(internal->iface_name, ifname)) {
211                         found = 1;
212                         break;
213                 }
214         }
215
216         pthread_mutex_unlock(&internal_list_lock);
217
218         if (!found)
219                 return NULL;
220
221         return list;
222 }
223
224 static int
225 new_device(struct virtio_net *dev)
226 {
227         struct rte_eth_dev *eth_dev;
228         struct internal_list *list;
229         struct pmd_internal *internal;
230         struct vhost_queue *vq;
231         unsigned i;
232         char ifname[PATH_MAX];
233 #ifdef RTE_LIBRTE_VHOST_NUMA
234         int newnode;
235 #endif
236
237         if (dev == NULL) {
238                 RTE_LOG(INFO, PMD, "Invalid argument\n");
239                 return -1;
240         }
241
242         rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
243         list = find_internal_resource(ifname);
244         if (list == NULL) {
245                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
246                 return -1;
247         }
248
249         eth_dev = list->eth_dev;
250         internal = eth_dev->data->dev_private;
251
252 #ifdef RTE_LIBRTE_VHOST_NUMA
253         newnode = rte_vhost_get_numa_node(dev->vid);
254         if (newnode >= 0)
255                 eth_dev->data->numa_node = newnode;
256 #endif
257
258         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
259                 vq = eth_dev->data->rx_queues[i];
260                 if (vq == NULL)
261                         continue;
262                 vq->device = dev;
263                 vq->internal = internal;
264                 vq->port = eth_dev->data->port_id;
265         }
266         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
267                 vq = eth_dev->data->tx_queues[i];
268                 if (vq == NULL)
269                         continue;
270                 vq->device = dev;
271                 vq->internal = internal;
272                 vq->port = eth_dev->data->port_id;
273         }
274
275         for (i = 0; i < rte_vhost_get_queue_num(dev->vid) * VIRTIO_QNUM; i++)
276                 rte_vhost_enable_guest_notification(dev, i, 0);
277
278         dev->priv = eth_dev;
279         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
280
281         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
282                 vq = eth_dev->data->rx_queues[i];
283                 if (vq == NULL)
284                         continue;
285                 rte_atomic32_set(&vq->allow_queuing, 1);
286         }
287         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
288                 vq = eth_dev->data->tx_queues[i];
289                 if (vq == NULL)
290                         continue;
291                 rte_atomic32_set(&vq->allow_queuing, 1);
292         }
293
294         RTE_LOG(INFO, PMD, "New connection established\n");
295
296         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
297
298         return 0;
299 }
300
301 static void
302 destroy_device(volatile struct virtio_net *dev)
303 {
304         struct rte_eth_dev *eth_dev;
305         struct vhost_queue *vq;
306         unsigned i;
307
308         if (dev == NULL) {
309                 RTE_LOG(INFO, PMD, "Invalid argument\n");
310                 return;
311         }
312
313         eth_dev = (struct rte_eth_dev *)dev->priv;
314         if (eth_dev == NULL) {
315                 RTE_LOG(INFO, PMD, "Failed to find a ethdev\n");
316                 return;
317         }
318
319         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
320         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
321                 vq = eth_dev->data->rx_queues[i];
322                 if (vq == NULL)
323                         continue;
324                 rte_atomic32_set(&vq->allow_queuing, 0);
325                 while (rte_atomic32_read(&vq->while_queuing))
326                         rte_pause();
327         }
328         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
329                 vq = eth_dev->data->tx_queues[i];
330                 if (vq == NULL)
331                         continue;
332                 rte_atomic32_set(&vq->allow_queuing, 0);
333                 while (rte_atomic32_read(&vq->while_queuing))
334                         rte_pause();
335         }
336
337         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
338
339         dev->priv = NULL;
340
341         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
342                 vq = eth_dev->data->rx_queues[i];
343                 if (vq == NULL)
344                         continue;
345                 vq->device = NULL;
346         }
347         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
348                 vq = eth_dev->data->tx_queues[i];
349                 if (vq == NULL)
350                         continue;
351                 vq->device = NULL;
352         }
353
354         RTE_LOG(INFO, PMD, "Connection closed\n");
355
356         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
357 }
358
359 static int
360 vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
361 {
362         struct rte_vhost_vring_state *state;
363         struct rte_eth_dev *eth_dev;
364         struct internal_list *list;
365         char ifname[PATH_MAX];
366
367         if (dev == NULL) {
368                 RTE_LOG(ERR, PMD, "Invalid argument\n");
369                 return -1;
370         }
371
372         rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
373         list = find_internal_resource(ifname);
374         if (list == NULL) {
375                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
376                 return -1;
377         }
378
379         eth_dev = list->eth_dev;
380         /* won't be NULL */
381         state = vring_states[eth_dev->data->port_id];
382         rte_spinlock_lock(&state->lock);
383         state->cur[vring] = enable;
384         state->max_vring = RTE_MAX(vring, state->max_vring);
385         rte_spinlock_unlock(&state->lock);
386
387         RTE_LOG(INFO, PMD, "vring%u is %s\n",
388                         vring, enable ? "enabled" : "disabled");
389
390         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE);
391
392         return 0;
393 }
394
395 int
396 rte_eth_vhost_get_queue_event(uint8_t port_id,
397                 struct rte_eth_vhost_queue_event *event)
398 {
399         struct rte_vhost_vring_state *state;
400         unsigned int i;
401         int idx;
402
403         if (port_id >= RTE_MAX_ETHPORTS) {
404                 RTE_LOG(ERR, PMD, "Invalid port id\n");
405                 return -1;
406         }
407
408         state = vring_states[port_id];
409         if (!state) {
410                 RTE_LOG(ERR, PMD, "Unused port\n");
411                 return -1;
412         }
413
414         rte_spinlock_lock(&state->lock);
415         for (i = 0; i <= state->max_vring; i++) {
416                 idx = state->index++ % (state->max_vring + 1);
417
418                 if (state->cur[idx] != state->seen[idx]) {
419                         state->seen[idx] = state->cur[idx];
420                         event->queue_id = idx / 2;
421                         event->rx = idx & 1;
422                         event->enable = state->cur[idx];
423                         rte_spinlock_unlock(&state->lock);
424                         return 0;
425                 }
426         }
427         rte_spinlock_unlock(&state->lock);
428
429         return -1;
430 }
431
432 static void *
433 vhost_driver_session(void *param __rte_unused)
434 {
435         static struct virtio_net_device_ops vhost_ops;
436
437         /* set vhost arguments */
438         vhost_ops.new_device = new_device;
439         vhost_ops.destroy_device = destroy_device;
440         vhost_ops.vring_state_changed = vring_state_changed;
441         if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
442                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
443
444         /* start event handling */
445         rte_vhost_driver_session_start();
446
447         return NULL;
448 }
449
450 static int
451 vhost_driver_session_start(void)
452 {
453         int ret;
454
455         ret = pthread_create(&session_th,
456                         NULL, vhost_driver_session, NULL);
457         if (ret)
458                 RTE_LOG(ERR, PMD, "Can't create a thread\n");
459
460         return ret;
461 }
462
463 static void
464 vhost_driver_session_stop(void)
465 {
466         int ret;
467
468         ret = pthread_cancel(session_th);
469         if (ret)
470                 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
471
472         ret = pthread_join(session_th, NULL);
473         if (ret)
474                 RTE_LOG(ERR, PMD, "Can't join the thread\n");
475 }
476
477 static int
478 eth_dev_start(struct rte_eth_dev *dev)
479 {
480         struct pmd_internal *internal = dev->data->dev_private;
481         int ret = 0;
482
483         if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
484                 ret = rte_vhost_driver_register(internal->iface_name);
485                 if (ret)
486                         return ret;
487         }
488
489         /* We need only one message handling thread */
490         if (rte_atomic16_add_return(&nb_started_ports, 1) == 1)
491                 ret = vhost_driver_session_start();
492
493         return ret;
494 }
495
496 static void
497 eth_dev_stop(struct rte_eth_dev *dev)
498 {
499         struct pmd_internal *internal = dev->data->dev_private;
500
501         if (rte_atomic16_cmpset(&internal->once, 1, 0))
502                 rte_vhost_driver_unregister(internal->iface_name);
503
504         if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
505                 vhost_driver_session_stop();
506 }
507
508 static int
509 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
510                    uint16_t nb_rx_desc __rte_unused,
511                    unsigned int socket_id,
512                    const struct rte_eth_rxconf *rx_conf __rte_unused,
513                    struct rte_mempool *mb_pool)
514 {
515         struct vhost_queue *vq;
516
517         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
518                         RTE_CACHE_LINE_SIZE, socket_id);
519         if (vq == NULL) {
520                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
521                 return -ENOMEM;
522         }
523
524         vq->mb_pool = mb_pool;
525         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
526         dev->data->rx_queues[rx_queue_id] = vq;
527
528         return 0;
529 }
530
531 static int
532 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
533                    uint16_t nb_tx_desc __rte_unused,
534                    unsigned int socket_id,
535                    const struct rte_eth_txconf *tx_conf __rte_unused)
536 {
537         struct vhost_queue *vq;
538
539         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
540                         RTE_CACHE_LINE_SIZE, socket_id);
541         if (vq == NULL) {
542                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
543                 return -ENOMEM;
544         }
545
546         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
547         dev->data->tx_queues[tx_queue_id] = vq;
548
549         return 0;
550 }
551
552 static void
553 eth_dev_info(struct rte_eth_dev *dev,
554              struct rte_eth_dev_info *dev_info)
555 {
556         struct pmd_internal *internal;
557
558         internal = dev->data->dev_private;
559         if (internal == NULL) {
560                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
561                 return;
562         }
563
564         dev_info->driver_name = drivername;
565         dev_info->max_mac_addrs = 1;
566         dev_info->max_rx_pktlen = (uint32_t)-1;
567         dev_info->max_rx_queues = internal->max_queues;
568         dev_info->max_tx_queues = internal->max_queues;
569         dev_info->min_rx_bufsize = 0;
570 }
571
572 static void
573 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
574 {
575         unsigned i;
576         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
577         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
578         struct vhost_queue *vq;
579
580         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
581                         i < dev->data->nb_rx_queues; i++) {
582                 if (dev->data->rx_queues[i] == NULL)
583                         continue;
584                 vq = dev->data->rx_queues[i];
585                 stats->q_ipackets[i] = vq->rx_pkts;
586                 rx_total += stats->q_ipackets[i];
587
588                 stats->q_ibytes[i] = vq->rx_bytes;
589                 rx_total_bytes += stats->q_ibytes[i];
590         }
591
592         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
593                         i < dev->data->nb_tx_queues; i++) {
594                 if (dev->data->tx_queues[i] == NULL)
595                         continue;
596                 vq = dev->data->tx_queues[i];
597                 stats->q_opackets[i] = vq->tx_pkts;
598                 tx_missed_total += vq->missed_pkts;
599                 tx_total += stats->q_opackets[i];
600
601                 stats->q_obytes[i] = vq->tx_bytes;
602                 tx_total_bytes += stats->q_obytes[i];
603         }
604
605         stats->ipackets = rx_total;
606         stats->opackets = tx_total;
607         stats->imissed = tx_missed_total;
608         stats->ibytes = rx_total_bytes;
609         stats->obytes = tx_total_bytes;
610 }
611
612 static void
613 eth_stats_reset(struct rte_eth_dev *dev)
614 {
615         struct vhost_queue *vq;
616         unsigned i;
617
618         for (i = 0; i < dev->data->nb_rx_queues; i++) {
619                 if (dev->data->rx_queues[i] == NULL)
620                         continue;
621                 vq = dev->data->rx_queues[i];
622                 vq->rx_pkts = 0;
623                 vq->rx_bytes = 0;
624         }
625         for (i = 0; i < dev->data->nb_tx_queues; i++) {
626                 if (dev->data->tx_queues[i] == NULL)
627                         continue;
628                 vq = dev->data->tx_queues[i];
629                 vq->tx_pkts = 0;
630                 vq->tx_bytes = 0;
631                 vq->missed_pkts = 0;
632         }
633 }
634
635 static void
636 eth_queue_release(void *q)
637 {
638         rte_free(q);
639 }
640
641 static int
642 eth_link_update(struct rte_eth_dev *dev __rte_unused,
643                 int wait_to_complete __rte_unused)
644 {
645         return 0;
646 }
647
648 /**
649  * Disable features in feature_mask. Returns 0 on success.
650  */
651 int
652 rte_eth_vhost_feature_disable(uint64_t feature_mask)
653 {
654         return rte_vhost_feature_disable(feature_mask);
655 }
656
657 /**
658  * Enable features in feature_mask. Returns 0 on success.
659  */
660 int
661 rte_eth_vhost_feature_enable(uint64_t feature_mask)
662 {
663         return rte_vhost_feature_enable(feature_mask);
664 }
665
666 /* Returns currently supported vhost features */
667 uint64_t
668 rte_eth_vhost_feature_get(void)
669 {
670         return rte_vhost_feature_get();
671 }
672
673 static const struct eth_dev_ops ops = {
674         .dev_start = eth_dev_start,
675         .dev_stop = eth_dev_stop,
676         .dev_configure = eth_dev_configure,
677         .dev_infos_get = eth_dev_info,
678         .rx_queue_setup = eth_rx_queue_setup,
679         .tx_queue_setup = eth_tx_queue_setup,
680         .rx_queue_release = eth_queue_release,
681         .tx_queue_release = eth_queue_release,
682         .link_update = eth_link_update,
683         .stats_get = eth_stats_get,
684         .stats_reset = eth_stats_reset,
685 };
686
687 static int
688 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
689                      const unsigned numa_node)
690 {
691         struct rte_eth_dev_data *data = NULL;
692         struct pmd_internal *internal = NULL;
693         struct rte_eth_dev *eth_dev = NULL;
694         struct ether_addr *eth_addr = NULL;
695         struct rte_vhost_vring_state *vring_state = NULL;
696         struct internal_list *list = NULL;
697
698         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
699                 numa_node);
700
701         /* now do all data allocation - for eth_dev structure, dummy pci driver
702          * and internal (private) data
703          */
704         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
705         if (data == NULL)
706                 goto error;
707
708         internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
709         if (internal == NULL)
710                 goto error;
711
712         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
713         if (list == NULL)
714                 goto error;
715
716         /* reserve an ethdev entry */
717         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
718         if (eth_dev == NULL)
719                 goto error;
720
721         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
722         if (eth_addr == NULL)
723                 goto error;
724         *eth_addr = base_eth_addr;
725         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
726
727         vring_state = rte_zmalloc_socket(name,
728                         sizeof(*vring_state), 0, numa_node);
729         if (vring_state == NULL)
730                 goto error;
731
732         TAILQ_INIT(&eth_dev->link_intr_cbs);
733
734         /* now put it all together
735          * - store queue data in internal,
736          * - store numa_node info in ethdev data
737          * - point eth_dev_data to internals
738          * - and point eth_dev structure to new eth_dev_data structure
739          */
740         internal->dev_name = strdup(name);
741         if (internal->dev_name == NULL)
742                 goto error;
743         internal->iface_name = strdup(iface_name);
744         if (internal->iface_name == NULL)
745                 goto error;
746
747         list->eth_dev = eth_dev;
748         pthread_mutex_lock(&internal_list_lock);
749         TAILQ_INSERT_TAIL(&internal_list, list, next);
750         pthread_mutex_unlock(&internal_list_lock);
751
752         rte_spinlock_init(&vring_state->lock);
753         vring_states[eth_dev->data->port_id] = vring_state;
754
755         data->dev_private = internal;
756         data->port_id = eth_dev->data->port_id;
757         memmove(data->name, eth_dev->data->name, sizeof(data->name));
758         data->nb_rx_queues = queues;
759         data->nb_tx_queues = queues;
760         internal->max_queues = queues;
761         data->dev_link = pmd_link;
762         data->mac_addrs = eth_addr;
763
764         /* We'll replace the 'data' originally allocated by eth_dev. So the
765          * vhost PMD resources won't be shared between multi processes.
766          */
767         eth_dev->data = data;
768         eth_dev->dev_ops = &ops;
769         eth_dev->driver = NULL;
770         data->dev_flags =
771                 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
772         data->kdrv = RTE_KDRV_NONE;
773         data->drv_name = internal->dev_name;
774         data->numa_node = numa_node;
775
776         /* finally assign rx and tx ops */
777         eth_dev->rx_pkt_burst = eth_vhost_rx;
778         eth_dev->tx_pkt_burst = eth_vhost_tx;
779
780         return data->port_id;
781
782 error:
783         if (internal)
784                 free(internal->dev_name);
785         rte_free(vring_state);
786         rte_free(eth_addr);
787         if (eth_dev)
788                 rte_eth_dev_release_port(eth_dev);
789         rte_free(internal);
790         rte_free(list);
791         rte_free(data);
792
793         return -1;
794 }
795
796 static inline int
797 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
798 {
799         const char **iface_name = extra_args;
800
801         if (value == NULL)
802                 return -1;
803
804         *iface_name = value;
805
806         return 0;
807 }
808
809 static inline int
810 open_queues(const char *key __rte_unused, const char *value, void *extra_args)
811 {
812         uint16_t *q = extra_args;
813
814         if (value == NULL || extra_args == NULL)
815                 return -EINVAL;
816
817         *q = (uint16_t)strtoul(value, NULL, 0);
818         if (*q == USHRT_MAX && errno == ERANGE)
819                 return -1;
820
821         if (*q > RTE_MAX_QUEUES_PER_PORT)
822                 return -1;
823
824         return 0;
825 }
826
827 static int
828 rte_pmd_vhost_devinit(const char *name, const char *params)
829 {
830         struct rte_kvargs *kvlist = NULL;
831         int ret = 0;
832         char *iface_name;
833         uint16_t queues;
834
835         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
836
837         kvlist = rte_kvargs_parse(params, valid_arguments);
838         if (kvlist == NULL)
839                 return -1;
840
841         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
842                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
843                                          &open_iface, &iface_name);
844                 if (ret < 0)
845                         goto out_free;
846         } else {
847                 ret = -1;
848                 goto out_free;
849         }
850
851         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
852                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
853                                          &open_queues, &queues);
854                 if (ret < 0)
855                         goto out_free;
856
857         } else
858                 queues = 1;
859
860         eth_dev_vhost_create(name, iface_name, queues, rte_socket_id());
861
862 out_free:
863         rte_kvargs_free(kvlist);
864         return ret;
865 }
866
867 static int
868 rte_pmd_vhost_devuninit(const char *name)
869 {
870         struct rte_eth_dev *eth_dev = NULL;
871         struct pmd_internal *internal;
872         struct internal_list *list;
873         unsigned int i;
874
875         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
876
877         /* find an ethdev entry */
878         eth_dev = rte_eth_dev_allocated(name);
879         if (eth_dev == NULL)
880                 return -ENODEV;
881
882         internal = eth_dev->data->dev_private;
883         if (internal == NULL)
884                 return -ENODEV;
885
886         list = find_internal_resource(internal->iface_name);
887         if (list == NULL)
888                 return -ENODEV;
889
890         pthread_mutex_lock(&internal_list_lock);
891         TAILQ_REMOVE(&internal_list, list, next);
892         pthread_mutex_unlock(&internal_list_lock);
893         rte_free(list);
894
895         eth_dev_stop(eth_dev);
896
897         rte_free(vring_states[eth_dev->data->port_id]);
898         vring_states[eth_dev->data->port_id] = NULL;
899
900         free(internal->dev_name);
901         free(internal->iface_name);
902
903         for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
904                 rte_free(eth_dev->data->rx_queues[i]);
905         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
906                 rte_free(eth_dev->data->tx_queues[i]);
907
908         rte_free(eth_dev->data->mac_addrs);
909         rte_free(eth_dev->data);
910         rte_free(internal);
911
912         rte_eth_dev_release_port(eth_dev);
913
914         return 0;
915 }
916
917 static struct rte_driver pmd_vhost_drv = {
918         .name = "eth_vhost",
919         .type = PMD_VDEV,
920         .init = rte_pmd_vhost_devinit,
921         .uninit = rte_pmd_vhost_devuninit,
922 };
923
924 PMD_REGISTER_DRIVER(pmd_vhost_drv);