b1c39a5c7dc68fd06b38b6f37c2059a62d4d087b
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 IGEL Co., Ltd.
3  * Copyright(c) 2016-2018 Intel Corporation
4  */
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8 #include <sys/epoll.h>
9
10 #include <rte_mbuf.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19
20 #include "rte_eth_vhost.h"
21
22 RTE_LOG_REGISTER_DEFAULT(vhost_logtype, NOTICE);
23
24 #define VHOST_LOG(level, ...) \
25         rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26
27 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28
29 #define ETH_VHOST_IFACE_ARG             "iface"
30 #define ETH_VHOST_QUEUES_ARG            "queues"
31 #define ETH_VHOST_CLIENT_ARG            "client"
32 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT      "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF  "linear-buffer"
36 #define ETH_VHOST_EXT_BUF  "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
38
39 static const char *valid_arguments[] = {
40         ETH_VHOST_IFACE_ARG,
41         ETH_VHOST_QUEUES_ARG,
42         ETH_VHOST_CLIENT_ARG,
43         ETH_VHOST_IOMMU_SUPPORT,
44         ETH_VHOST_POSTCOPY_SUPPORT,
45         ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
46         ETH_VHOST_LINEAR_BUF,
47         ETH_VHOST_EXT_BUF,
48         NULL
49 };
50
51 static struct rte_ether_addr base_eth_addr = {
52         .addr_bytes = {
53                 0x56 /* V */,
54                 0x48 /* H */,
55                 0x4F /* O */,
56                 0x53 /* S */,
57                 0x54 /* T */,
58                 0x00
59         }
60 };
61
62 enum vhost_xstats_pkts {
63         VHOST_UNDERSIZE_PKT = 0,
64         VHOST_64_PKT,
65         VHOST_65_TO_127_PKT,
66         VHOST_128_TO_255_PKT,
67         VHOST_256_TO_511_PKT,
68         VHOST_512_TO_1023_PKT,
69         VHOST_1024_TO_1522_PKT,
70         VHOST_1523_TO_MAX_PKT,
71         VHOST_BROADCAST_PKT,
72         VHOST_MULTICAST_PKT,
73         VHOST_UNICAST_PKT,
74         VHOST_PKT,
75         VHOST_BYTE,
76         VHOST_MISSED_PKT,
77         VHOST_ERRORS_PKT,
78         VHOST_ERRORS_FRAGMENTED,
79         VHOST_ERRORS_JABBER,
80         VHOST_UNKNOWN_PROTOCOL,
81         VHOST_XSTATS_MAX,
82 };
83
84 struct vhost_stats {
85         uint64_t pkts;
86         uint64_t bytes;
87         uint64_t missed_pkts;
88         uint64_t xstats[VHOST_XSTATS_MAX];
89 };
90
91 struct vhost_queue {
92         int vid;
93         rte_atomic32_t allow_queuing;
94         rte_atomic32_t while_queuing;
95         struct pmd_internal *internal;
96         struct rte_mempool *mb_pool;
97         uint16_t port;
98         uint16_t virtqueue_id;
99         struct vhost_stats stats;
100         int intr_enable;
101         rte_spinlock_t intr_lock;
102 };
103
104 struct pmd_internal {
105         rte_atomic32_t dev_attached;
106         char *iface_name;
107         uint64_t flags;
108         uint64_t disable_flags;
109         uint16_t max_queues;
110         int vid;
111         rte_atomic32_t started;
112         uint8_t vlan_strip;
113 };
114
115 struct internal_list {
116         TAILQ_ENTRY(internal_list) next;
117         struct rte_eth_dev *eth_dev;
118 };
119
120 TAILQ_HEAD(internal_list_head, internal_list);
121 static struct internal_list_head internal_list =
122         TAILQ_HEAD_INITIALIZER(internal_list);
123
124 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
125
126 static struct rte_eth_link pmd_link = {
127                 .link_speed = 10000,
128                 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
129                 .link_status = RTE_ETH_LINK_DOWN
130 };
131
132 struct rte_vhost_vring_state {
133         rte_spinlock_t lock;
134
135         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
136         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
137         unsigned int index;
138         unsigned int max_vring;
139 };
140
141 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
142
143 #define VHOST_XSTATS_NAME_SIZE 64
144
145 struct vhost_xstats_name_off {
146         char name[VHOST_XSTATS_NAME_SIZE];
147         uint64_t offset;
148 };
149
150 /* [rx]_is prepended to the name string here */
151 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
152         {"good_packets",
153          offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
154         {"total_bytes",
155          offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
156         {"missed_pkts",
157          offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
158         {"broadcast_packets",
159          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
160         {"multicast_packets",
161          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
162         {"unicast_packets",
163          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
164          {"undersize_packets",
165          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
166         {"size_64_packets",
167          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
168         {"size_65_to_127_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
170         {"size_128_to_255_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
172         {"size_256_to_511_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
174         {"size_512_to_1023_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
176         {"size_1024_to_1522_packets",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
178         {"size_1523_to_max_packets",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
180         {"errors_with_bad_CRC",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
182         {"fragmented_errors",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
184         {"jabber_errors",
185          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
186         {"unknown_protos_packets",
187          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
188 };
189
190 /* [tx]_ is prepended to the name string here */
191 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
192         {"good_packets",
193          offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
194         {"total_bytes",
195          offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
196         {"missed_pkts",
197          offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
198         {"broadcast_packets",
199          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
200         {"multicast_packets",
201          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
202         {"unicast_packets",
203          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
204         {"undersize_packets",
205          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
206         {"size_64_packets",
207          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
208         {"size_65_to_127_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
210         {"size_128_to_255_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
212         {"size_256_to_511_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
214         {"size_512_to_1023_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
216         {"size_1024_to_1522_packets",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
218         {"size_1523_to_max_packets",
219          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
220         {"errors_with_bad_CRC",
221          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
222 };
223
224 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
225                                 sizeof(vhost_rxport_stat_strings[0]))
226
227 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
228                                 sizeof(vhost_txport_stat_strings[0]))
229
230 static int
231 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
232 {
233         struct vhost_queue *vq = NULL;
234         unsigned int i = 0;
235
236         for (i = 0; i < dev->data->nb_rx_queues; i++) {
237                 vq = dev->data->rx_queues[i];
238                 if (!vq)
239                         continue;
240                 memset(&vq->stats, 0, sizeof(vq->stats));
241         }
242         for (i = 0; i < dev->data->nb_tx_queues; i++) {
243                 vq = dev->data->tx_queues[i];
244                 if (!vq)
245                         continue;
246                 memset(&vq->stats, 0, sizeof(vq->stats));
247         }
248
249         return 0;
250 }
251
252 static int
253 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
254                            struct rte_eth_xstat_name *xstats_names,
255                            unsigned int limit __rte_unused)
256 {
257         unsigned int t = 0;
258         int count = 0;
259         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
260
261         if (!xstats_names)
262                 return nstats;
263         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
264                 snprintf(xstats_names[count].name,
265                          sizeof(xstats_names[count].name),
266                          "rx_%s", vhost_rxport_stat_strings[t].name);
267                 count++;
268         }
269         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
270                 snprintf(xstats_names[count].name,
271                          sizeof(xstats_names[count].name),
272                          "tx_%s", vhost_txport_stat_strings[t].name);
273                 count++;
274         }
275         return count;
276 }
277
278 static int
279 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
280                      unsigned int n)
281 {
282         unsigned int i;
283         unsigned int t;
284         unsigned int count = 0;
285         struct vhost_queue *vq = NULL;
286         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
287
288         if (n < nxstats)
289                 return nxstats;
290
291         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
292                 xstats[count].value = 0;
293                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
294                         vq = dev->data->rx_queues[i];
295                         if (!vq)
296                                 continue;
297                         xstats[count].value +=
298                                 *(uint64_t *)(((char *)vq)
299                                 + vhost_rxport_stat_strings[t].offset);
300                 }
301                 xstats[count].id = count;
302                 count++;
303         }
304         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
305                 xstats[count].value = 0;
306                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
307                         vq = dev->data->tx_queues[i];
308                         if (!vq)
309                                 continue;
310                         xstats[count].value +=
311                                 *(uint64_t *)(((char *)vq)
312                                 + vhost_txport_stat_strings[t].offset);
313                 }
314                 xstats[count].id = count;
315                 count++;
316         }
317         return count;
318 }
319
320 static inline void
321 vhost_count_xcast_packets(struct vhost_queue *vq,
322                                 struct rte_mbuf *mbuf)
323 {
324         struct rte_ether_addr *ea = NULL;
325         struct vhost_stats *pstats = &vq->stats;
326
327         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
328         if (rte_is_multicast_ether_addr(ea)) {
329                 if (rte_is_broadcast_ether_addr(ea))
330                         pstats->xstats[VHOST_BROADCAST_PKT]++;
331                 else
332                         pstats->xstats[VHOST_MULTICAST_PKT]++;
333         } else {
334                 pstats->xstats[VHOST_UNICAST_PKT]++;
335         }
336 }
337
338 static __rte_always_inline void
339 vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
340 {
341         uint32_t pkt_len = 0;
342         uint64_t index;
343         struct vhost_stats *pstats = &vq->stats;
344
345         pstats->xstats[VHOST_PKT]++;
346         pkt_len = buf->pkt_len;
347         if (pkt_len == 64) {
348                 pstats->xstats[VHOST_64_PKT]++;
349         } else if (pkt_len > 64 && pkt_len < 1024) {
350                 index = (sizeof(pkt_len) * 8)
351                         - __builtin_clz(pkt_len) - 5;
352                 pstats->xstats[index]++;
353         } else {
354                 if (pkt_len < 64)
355                         pstats->xstats[VHOST_UNDERSIZE_PKT]++;
356                 else if (pkt_len <= 1522)
357                         pstats->xstats[VHOST_1024_TO_1522_PKT]++;
358                 else if (pkt_len > 1522)
359                         pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
360         }
361         vhost_count_xcast_packets(vq, buf);
362 }
363
364 static uint16_t
365 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
366 {
367         struct vhost_queue *r = q;
368         uint16_t i, nb_rx = 0;
369         uint16_t nb_receive = nb_bufs;
370
371         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
372                 return 0;
373
374         rte_atomic32_set(&r->while_queuing, 1);
375
376         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
377                 goto out;
378
379         /* Dequeue packets from guest TX queue */
380         while (nb_receive) {
381                 uint16_t nb_pkts;
382                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
383                                                  VHOST_MAX_PKT_BURST);
384
385                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
386                                                   r->mb_pool, &bufs[nb_rx],
387                                                   num);
388
389                 nb_rx += nb_pkts;
390                 nb_receive -= nb_pkts;
391                 if (nb_pkts < num)
392                         break;
393         }
394
395         r->stats.pkts += nb_rx;
396
397         for (i = 0; likely(i < nb_rx); i++) {
398                 bufs[i]->port = r->port;
399                 bufs[i]->vlan_tci = 0;
400
401                 if (r->internal->vlan_strip)
402                         rte_vlan_strip(bufs[i]);
403
404                 r->stats.bytes += bufs[i]->pkt_len;
405                 r->stats.xstats[VHOST_BYTE] += bufs[i]->pkt_len;
406
407                 vhost_update_single_packet_xstats(r, bufs[i]);
408         }
409
410 out:
411         rte_atomic32_set(&r->while_queuing, 0);
412
413         return nb_rx;
414 }
415
416 static uint16_t
417 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
418 {
419         struct vhost_queue *r = q;
420         uint16_t i, nb_tx = 0;
421         uint16_t nb_send = 0;
422         uint64_t nb_bytes = 0;
423         uint64_t nb_missed = 0;
424
425         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
426                 return 0;
427
428         rte_atomic32_set(&r->while_queuing, 1);
429
430         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
431                 goto out;
432
433         for (i = 0; i < nb_bufs; i++) {
434                 struct rte_mbuf *m = bufs[i];
435
436                 /* Do VLAN tag insertion */
437                 if (m->ol_flags & PKT_TX_VLAN) {
438                         int error = rte_vlan_insert(&m);
439                         if (unlikely(error)) {
440                                 rte_pktmbuf_free(m);
441                                 continue;
442                         }
443                 }
444
445                 bufs[nb_send] = m;
446                 ++nb_send;
447         }
448
449         /* Enqueue packets to guest RX queue */
450         while (nb_send) {
451                 uint16_t nb_pkts;
452                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
453                                                  VHOST_MAX_PKT_BURST);
454
455                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
456                                                   &bufs[nb_tx], num);
457
458                 nb_tx += nb_pkts;
459                 nb_send -= nb_pkts;
460                 if (nb_pkts < num)
461                         break;
462         }
463
464         for (i = 0; likely(i < nb_tx); i++) {
465                 nb_bytes += bufs[i]->pkt_len;
466                 vhost_update_single_packet_xstats(r, bufs[i]);
467         }
468
469         nb_missed = nb_bufs - nb_tx;
470
471         r->stats.pkts += nb_tx;
472         r->stats.bytes += nb_bytes;
473         r->stats.missed_pkts += nb_missed;
474
475         r->stats.xstats[VHOST_BYTE] += nb_bytes;
476         r->stats.xstats[VHOST_MISSED_PKT] += nb_missed;
477         r->stats.xstats[VHOST_UNICAST_PKT] += nb_missed;
478
479         /* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
480          * ifHCOutBroadcastPkts counters are increased when packets are not
481          * transmitted successfully.
482          */
483         for (i = nb_tx; i < nb_bufs; i++)
484                 vhost_count_xcast_packets(r, bufs[i]);
485
486         for (i = 0; likely(i < nb_tx); i++)
487                 rte_pktmbuf_free(bufs[i]);
488 out:
489         rte_atomic32_set(&r->while_queuing, 0);
490
491         return nb_tx;
492 }
493
494 static inline struct internal_list *
495 find_internal_resource(char *ifname)
496 {
497         int found = 0;
498         struct internal_list *list;
499         struct pmd_internal *internal;
500
501         if (ifname == NULL)
502                 return NULL;
503
504         pthread_mutex_lock(&internal_list_lock);
505
506         TAILQ_FOREACH(list, &internal_list, next) {
507                 internal = list->eth_dev->data->dev_private;
508                 if (!strcmp(internal->iface_name, ifname)) {
509                         found = 1;
510                         break;
511                 }
512         }
513
514         pthread_mutex_unlock(&internal_list_lock);
515
516         if (!found)
517                 return NULL;
518
519         return list;
520 }
521
522 static int
523 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
524 {
525         struct rte_intr_handle *handle = eth_dev->intr_handle;
526         struct rte_epoll_event rev;
527         int epfd, ret;
528
529         if (!handle)
530                 return 0;
531
532         if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd)
533                 return 0;
534
535         VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
536                         rxq_idx);
537
538         if (handle->elist[rxq_idx].fd != -1)
539                 VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
540                                 handle->elist[rxq_idx].fd);
541
542         /*
543          * First remove invalid epoll event, and then install
544          * the new one. May be solved with a proper API in the
545          * future.
546          */
547         epfd = handle->elist[rxq_idx].epfd;
548         rev = handle->elist[rxq_idx];
549         ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
550                         &handle->elist[rxq_idx]);
551         if (ret) {
552                 VHOST_LOG(ERR, "Delete epoll event failed.\n");
553                 return ret;
554         }
555
556         rev.fd = handle->efds[rxq_idx];
557         handle->elist[rxq_idx] = rev;
558         ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd,
559                         &handle->elist[rxq_idx]);
560         if (ret) {
561                 VHOST_LOG(ERR, "Add epoll event failed.\n");
562                 return ret;
563         }
564
565         return 0;
566 }
567
568 static int
569 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
570 {
571         struct rte_vhost_vring vring;
572         struct vhost_queue *vq;
573         int old_intr_enable, ret = 0;
574
575         vq = dev->data->rx_queues[qid];
576         if (!vq) {
577                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
578                 return -1;
579         }
580
581         rte_spinlock_lock(&vq->intr_lock);
582         old_intr_enable = vq->intr_enable;
583         vq->intr_enable = 1;
584         ret = eth_vhost_update_intr(dev, qid);
585         rte_spinlock_unlock(&vq->intr_lock);
586
587         if (ret < 0) {
588                 VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
589                 vq->intr_enable = old_intr_enable;
590                 return ret;
591         }
592
593         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
594         if (ret < 0) {
595                 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
596                 return ret;
597         }
598         VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
599         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
600         rte_wmb();
601
602         return ret;
603 }
604
605 static int
606 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
607 {
608         struct rte_vhost_vring vring;
609         struct vhost_queue *vq;
610         int ret = 0;
611
612         vq = dev->data->rx_queues[qid];
613         if (!vq) {
614                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
615                 return -1;
616         }
617
618         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
619         if (ret < 0) {
620                 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
621                 return ret;
622         }
623         VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
624         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
625         rte_wmb();
626
627         vq->intr_enable = 0;
628
629         return 0;
630 }
631
632 static void
633 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
634 {
635         struct rte_intr_handle *intr_handle = dev->intr_handle;
636
637         if (intr_handle) {
638                 if (intr_handle->intr_vec)
639                         free(intr_handle->intr_vec);
640                 free(intr_handle);
641         }
642
643         dev->intr_handle = NULL;
644 }
645
646 static int
647 eth_vhost_install_intr(struct rte_eth_dev *dev)
648 {
649         struct rte_vhost_vring vring;
650         struct vhost_queue *vq;
651         int nb_rxq = dev->data->nb_rx_queues;
652         int i;
653         int ret;
654
655         /* uninstall firstly if we are reconnecting */
656         if (dev->intr_handle)
657                 eth_vhost_uninstall_intr(dev);
658
659         dev->intr_handle = malloc(sizeof(*dev->intr_handle));
660         if (!dev->intr_handle) {
661                 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
662                 return -ENOMEM;
663         }
664         memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
665
666         dev->intr_handle->efd_counter_size = sizeof(uint64_t);
667
668         dev->intr_handle->intr_vec =
669                 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
670
671         if (!dev->intr_handle->intr_vec) {
672                 VHOST_LOG(ERR,
673                         "Failed to allocate memory for interrupt vector\n");
674                 free(dev->intr_handle);
675                 return -ENOMEM;
676         }
677
678         VHOST_LOG(INFO, "Prepare intr vec\n");
679         for (i = 0; i < nb_rxq; i++) {
680                 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
681                 dev->intr_handle->efds[i] = -1;
682                 vq = dev->data->rx_queues[i];
683                 if (!vq) {
684                         VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
685                         continue;
686                 }
687
688                 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
689                 if (ret < 0) {
690                         VHOST_LOG(INFO,
691                                 "Failed to get rxq-%d's vring, skip!\n", i);
692                         continue;
693                 }
694
695                 if (vring.kickfd < 0) {
696                         VHOST_LOG(INFO,
697                                 "rxq-%d's kickfd is invalid, skip!\n", i);
698                         continue;
699                 }
700                 dev->intr_handle->efds[i] = vring.kickfd;
701                 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
702         }
703
704         dev->intr_handle->nb_efd = nb_rxq;
705         dev->intr_handle->max_intr = nb_rxq + 1;
706         dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
707
708         return 0;
709 }
710
711 static void
712 update_queuing_status(struct rte_eth_dev *dev)
713 {
714         struct pmd_internal *internal = dev->data->dev_private;
715         struct vhost_queue *vq;
716         unsigned int i;
717         int allow_queuing = 1;
718
719         if (!dev->data->rx_queues || !dev->data->tx_queues)
720                 return;
721
722         if (rte_atomic32_read(&internal->started) == 0 ||
723             rte_atomic32_read(&internal->dev_attached) == 0)
724                 allow_queuing = 0;
725
726         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
727         for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                 vq = dev->data->rx_queues[i];
729                 if (vq == NULL)
730                         continue;
731                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
732                 while (rte_atomic32_read(&vq->while_queuing))
733                         rte_pause();
734         }
735
736         for (i = 0; i < dev->data->nb_tx_queues; i++) {
737                 vq = dev->data->tx_queues[i];
738                 if (vq == NULL)
739                         continue;
740                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
741                 while (rte_atomic32_read(&vq->while_queuing))
742                         rte_pause();
743         }
744 }
745
746 static void
747 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
748 {
749         struct vhost_queue *vq;
750         int i;
751
752         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
753                 vq = eth_dev->data->rx_queues[i];
754                 if (!vq)
755                         continue;
756                 vq->vid = internal->vid;
757                 vq->internal = internal;
758                 vq->port = eth_dev->data->port_id;
759         }
760         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
761                 vq = eth_dev->data->tx_queues[i];
762                 if (!vq)
763                         continue;
764                 vq->vid = internal->vid;
765                 vq->internal = internal;
766                 vq->port = eth_dev->data->port_id;
767         }
768 }
769
770 static int
771 new_device(int vid)
772 {
773         struct rte_eth_dev *eth_dev;
774         struct internal_list *list;
775         struct pmd_internal *internal;
776         struct rte_eth_conf *dev_conf;
777         unsigned i;
778         char ifname[PATH_MAX];
779 #ifdef RTE_LIBRTE_VHOST_NUMA
780         int newnode;
781 #endif
782
783         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
784         list = find_internal_resource(ifname);
785         if (list == NULL) {
786                 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
787                 return -1;
788         }
789
790         eth_dev = list->eth_dev;
791         internal = eth_dev->data->dev_private;
792         dev_conf = &eth_dev->data->dev_conf;
793
794 #ifdef RTE_LIBRTE_VHOST_NUMA
795         newnode = rte_vhost_get_numa_node(vid);
796         if (newnode >= 0)
797                 eth_dev->data->numa_node = newnode;
798 #endif
799
800         internal->vid = vid;
801         if (rte_atomic32_read(&internal->started) == 1) {
802                 queue_setup(eth_dev, internal);
803
804                 if (dev_conf->intr_conf.rxq) {
805                         if (eth_vhost_install_intr(eth_dev) < 0) {
806                                 VHOST_LOG(INFO,
807                                         "Failed to install interrupt handler.");
808                                         return -1;
809                         }
810                 }
811         } else {
812                 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
813         }
814
815         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
816                 rte_vhost_enable_guest_notification(vid, i, 0);
817
818         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
819
820         eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
821
822         rte_atomic32_set(&internal->dev_attached, 1);
823         update_queuing_status(eth_dev);
824
825         VHOST_LOG(INFO, "Vhost device %d created\n", vid);
826
827         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
828
829         return 0;
830 }
831
832 static void
833 destroy_device(int vid)
834 {
835         struct rte_eth_dev *eth_dev;
836         struct pmd_internal *internal;
837         struct vhost_queue *vq;
838         struct internal_list *list;
839         char ifname[PATH_MAX];
840         unsigned i;
841         struct rte_vhost_vring_state *state;
842
843         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
844         list = find_internal_resource(ifname);
845         if (list == NULL) {
846                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
847                 return;
848         }
849         eth_dev = list->eth_dev;
850         internal = eth_dev->data->dev_private;
851
852         rte_atomic32_set(&internal->dev_attached, 0);
853         update_queuing_status(eth_dev);
854
855         eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
856
857         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
858                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
859                         vq = eth_dev->data->rx_queues[i];
860                         if (!vq)
861                                 continue;
862                         vq->vid = -1;
863                 }
864                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
865                         vq = eth_dev->data->tx_queues[i];
866                         if (!vq)
867                                 continue;
868                         vq->vid = -1;
869                 }
870         }
871
872         state = vring_states[eth_dev->data->port_id];
873         rte_spinlock_lock(&state->lock);
874         for (i = 0; i <= state->max_vring; i++) {
875                 state->cur[i] = false;
876                 state->seen[i] = false;
877         }
878         state->max_vring = 0;
879         rte_spinlock_unlock(&state->lock);
880
881         VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
882         eth_vhost_uninstall_intr(eth_dev);
883
884         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
885 }
886
887 static int
888 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
889 {
890         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
891         struct pmd_internal *internal = eth_dev->data->dev_private;
892         struct vhost_queue *vq;
893         struct rte_vhost_vring vring;
894         int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
895         int ret = 0;
896
897         /*
898          * The vring kickfd may be changed after the new device notification.
899          * Update it when the vring state is updated.
900          */
901         if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
902             rte_atomic32_read(&internal->dev_attached) &&
903             rte_atomic32_read(&internal->started) &&
904             dev_conf->intr_conf.rxq) {
905                 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
906                 if (ret) {
907                         VHOST_LOG(ERR, "Failed to get vring %d information.\n",
908                                         vring_id);
909                         return ret;
910                 }
911                 eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
912
913                 vq = eth_dev->data->rx_queues[rx_idx];
914                 if (!vq) {
915                         VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
916                         return -1;
917                 }
918
919                 rte_spinlock_lock(&vq->intr_lock);
920                 if (vq->intr_enable)
921                         ret = eth_vhost_update_intr(eth_dev, rx_idx);
922                 rte_spinlock_unlock(&vq->intr_lock);
923         }
924
925         return ret;
926 }
927
928 static int
929 vring_state_changed(int vid, uint16_t vring, int enable)
930 {
931         struct rte_vhost_vring_state *state;
932         struct rte_eth_dev *eth_dev;
933         struct internal_list *list;
934         char ifname[PATH_MAX];
935
936         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
937         list = find_internal_resource(ifname);
938         if (list == NULL) {
939                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
940                 return -1;
941         }
942
943         eth_dev = list->eth_dev;
944         /* won't be NULL */
945         state = vring_states[eth_dev->data->port_id];
946
947         if (enable && vring_conf_update(vid, eth_dev, vring))
948                 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
949                           (int)vring);
950
951         rte_spinlock_lock(&state->lock);
952         if (state->cur[vring] == enable) {
953                 rte_spinlock_unlock(&state->lock);
954                 return 0;
955         }
956         state->cur[vring] = enable;
957         state->max_vring = RTE_MAX(vring, state->max_vring);
958         rte_spinlock_unlock(&state->lock);
959
960         VHOST_LOG(INFO, "vring%u is %s\n",
961                         vring, enable ? "enabled" : "disabled");
962
963         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
964
965         return 0;
966 }
967
968 static struct vhost_device_ops vhost_ops = {
969         .new_device          = new_device,
970         .destroy_device      = destroy_device,
971         .vring_state_changed = vring_state_changed,
972 };
973
974 static int
975 vhost_driver_setup(struct rte_eth_dev *eth_dev)
976 {
977         struct pmd_internal *internal = eth_dev->data->dev_private;
978         struct internal_list *list = NULL;
979         struct rte_vhost_vring_state *vring_state = NULL;
980         unsigned int numa_node = eth_dev->device->numa_node;
981         const char *name = eth_dev->device->name;
982
983         /* Don't try to setup again if it has already been done. */
984         list = find_internal_resource(internal->iface_name);
985         if (list)
986                 return 0;
987
988         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
989         if (list == NULL)
990                 return -1;
991
992         vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
993                                          0, numa_node);
994         if (vring_state == NULL)
995                 goto free_list;
996
997         list->eth_dev = eth_dev;
998         pthread_mutex_lock(&internal_list_lock);
999         TAILQ_INSERT_TAIL(&internal_list, list, next);
1000         pthread_mutex_unlock(&internal_list_lock);
1001
1002         rte_spinlock_init(&vring_state->lock);
1003         vring_states[eth_dev->data->port_id] = vring_state;
1004
1005         if (rte_vhost_driver_register(internal->iface_name, internal->flags))
1006                 goto list_remove;
1007
1008         if (internal->disable_flags) {
1009                 if (rte_vhost_driver_disable_features(internal->iface_name,
1010                                                       internal->disable_flags))
1011                         goto drv_unreg;
1012         }
1013
1014         if (rte_vhost_driver_callback_register(internal->iface_name,
1015                                                &vhost_ops) < 0) {
1016                 VHOST_LOG(ERR, "Can't register callbacks\n");
1017                 goto drv_unreg;
1018         }
1019
1020         if (rte_vhost_driver_start(internal->iface_name) < 0) {
1021                 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1022                           internal->iface_name);
1023                 goto drv_unreg;
1024         }
1025
1026         return 0;
1027
1028 drv_unreg:
1029         rte_vhost_driver_unregister(internal->iface_name);
1030 list_remove:
1031         vring_states[eth_dev->data->port_id] = NULL;
1032         pthread_mutex_lock(&internal_list_lock);
1033         TAILQ_REMOVE(&internal_list, list, next);
1034         pthread_mutex_unlock(&internal_list_lock);
1035         rte_free(vring_state);
1036 free_list:
1037         rte_free(list);
1038
1039         return -1;
1040 }
1041
1042 int
1043 rte_eth_vhost_get_queue_event(uint16_t port_id,
1044                 struct rte_eth_vhost_queue_event *event)
1045 {
1046         struct rte_vhost_vring_state *state;
1047         unsigned int i;
1048         int idx;
1049
1050         if (port_id >= RTE_MAX_ETHPORTS) {
1051                 VHOST_LOG(ERR, "Invalid port id\n");
1052                 return -1;
1053         }
1054
1055         state = vring_states[port_id];
1056         if (!state) {
1057                 VHOST_LOG(ERR, "Unused port\n");
1058                 return -1;
1059         }
1060
1061         rte_spinlock_lock(&state->lock);
1062         for (i = 0; i <= state->max_vring; i++) {
1063                 idx = state->index++ % (state->max_vring + 1);
1064
1065                 if (state->cur[idx] != state->seen[idx]) {
1066                         state->seen[idx] = state->cur[idx];
1067                         event->queue_id = idx / 2;
1068                         event->rx = idx & 1;
1069                         event->enable = state->cur[idx];
1070                         rte_spinlock_unlock(&state->lock);
1071                         return 0;
1072                 }
1073         }
1074         rte_spinlock_unlock(&state->lock);
1075
1076         return -1;
1077 }
1078
1079 int
1080 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1081 {
1082         struct internal_list *list;
1083         struct rte_eth_dev *eth_dev;
1084         struct vhost_queue *vq;
1085         int vid = -1;
1086
1087         if (!rte_eth_dev_is_valid_port(port_id))
1088                 return -1;
1089
1090         pthread_mutex_lock(&internal_list_lock);
1091
1092         TAILQ_FOREACH(list, &internal_list, next) {
1093                 eth_dev = list->eth_dev;
1094                 if (eth_dev->data->port_id == port_id) {
1095                         vq = eth_dev->data->rx_queues[0];
1096                         if (vq) {
1097                                 vid = vq->vid;
1098                         }
1099                         break;
1100                 }
1101         }
1102
1103         pthread_mutex_unlock(&internal_list_lock);
1104
1105         return vid;
1106 }
1107
1108 static int
1109 eth_dev_configure(struct rte_eth_dev *dev)
1110 {
1111         struct pmd_internal *internal = dev->data->dev_private;
1112         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1113
1114         /* NOTE: the same process has to operate a vhost interface
1115          * from beginning to end (from eth_dev configure to eth_dev close).
1116          * It is user's responsibility at the moment.
1117          */
1118         if (vhost_driver_setup(dev) < 0)
1119                 return -1;
1120
1121         internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1122
1123         return 0;
1124 }
1125
1126 static int
1127 eth_dev_start(struct rte_eth_dev *eth_dev)
1128 {
1129         struct pmd_internal *internal = eth_dev->data->dev_private;
1130         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
1131
1132         queue_setup(eth_dev, internal);
1133
1134         if (rte_atomic32_read(&internal->dev_attached) == 1) {
1135                 if (dev_conf->intr_conf.rxq) {
1136                         if (eth_vhost_install_intr(eth_dev) < 0) {
1137                                 VHOST_LOG(INFO,
1138                                         "Failed to install interrupt handler.");
1139                                         return -1;
1140                         }
1141                 }
1142         }
1143
1144         rte_atomic32_set(&internal->started, 1);
1145         update_queuing_status(eth_dev);
1146
1147         return 0;
1148 }
1149
1150 static int
1151 eth_dev_stop(struct rte_eth_dev *dev)
1152 {
1153         struct pmd_internal *internal = dev->data->dev_private;
1154
1155         dev->data->dev_started = 0;
1156         rte_atomic32_set(&internal->started, 0);
1157         update_queuing_status(dev);
1158
1159         return 0;
1160 }
1161
1162 static int
1163 eth_dev_close(struct rte_eth_dev *dev)
1164 {
1165         struct pmd_internal *internal;
1166         struct internal_list *list;
1167         unsigned int i, ret;
1168
1169         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1170                 return 0;
1171
1172         internal = dev->data->dev_private;
1173         if (!internal)
1174                 return 0;
1175
1176         ret = eth_dev_stop(dev);
1177
1178         list = find_internal_resource(internal->iface_name);
1179         if (list) {
1180                 rte_vhost_driver_unregister(internal->iface_name);
1181                 pthread_mutex_lock(&internal_list_lock);
1182                 TAILQ_REMOVE(&internal_list, list, next);
1183                 pthread_mutex_unlock(&internal_list_lock);
1184                 rte_free(list);
1185         }
1186
1187         if (dev->data->rx_queues)
1188                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1189                         rte_free(dev->data->rx_queues[i]);
1190
1191         if (dev->data->tx_queues)
1192                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1193                         rte_free(dev->data->tx_queues[i]);
1194
1195         rte_free(internal->iface_name);
1196         rte_free(internal);
1197
1198         dev->data->dev_private = NULL;
1199
1200         rte_free(vring_states[dev->data->port_id]);
1201         vring_states[dev->data->port_id] = NULL;
1202
1203         return ret;
1204 }
1205
1206 static int
1207 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1208                    uint16_t nb_rx_desc __rte_unused,
1209                    unsigned int socket_id,
1210                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1211                    struct rte_mempool *mb_pool)
1212 {
1213         struct vhost_queue *vq;
1214
1215         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1216                         RTE_CACHE_LINE_SIZE, socket_id);
1217         if (vq == NULL) {
1218                 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1219                 return -ENOMEM;
1220         }
1221
1222         vq->mb_pool = mb_pool;
1223         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1224         rte_spinlock_init(&vq->intr_lock);
1225         dev->data->rx_queues[rx_queue_id] = vq;
1226
1227         return 0;
1228 }
1229
1230 static int
1231 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1232                    uint16_t nb_tx_desc __rte_unused,
1233                    unsigned int socket_id,
1234                    const struct rte_eth_txconf *tx_conf __rte_unused)
1235 {
1236         struct vhost_queue *vq;
1237
1238         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1239                         RTE_CACHE_LINE_SIZE, socket_id);
1240         if (vq == NULL) {
1241                 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1242                 return -ENOMEM;
1243         }
1244
1245         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1246         rte_spinlock_init(&vq->intr_lock);
1247         dev->data->tx_queues[tx_queue_id] = vq;
1248
1249         return 0;
1250 }
1251
1252 static int
1253 eth_dev_info(struct rte_eth_dev *dev,
1254              struct rte_eth_dev_info *dev_info)
1255 {
1256         struct pmd_internal *internal;
1257
1258         internal = dev->data->dev_private;
1259         if (internal == NULL) {
1260                 VHOST_LOG(ERR, "Invalid device specified\n");
1261                 return -ENODEV;
1262         }
1263
1264         dev_info->max_mac_addrs = 1;
1265         dev_info->max_rx_pktlen = (uint32_t)-1;
1266         dev_info->max_rx_queues = internal->max_queues;
1267         dev_info->max_tx_queues = internal->max_queues;
1268         dev_info->min_rx_bufsize = 0;
1269
1270         dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1271                                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1272         dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1273
1274         return 0;
1275 }
1276
1277 static int
1278 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1279 {
1280         unsigned i;
1281         unsigned long rx_total = 0, tx_total = 0;
1282         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1283         struct vhost_queue *vq;
1284
1285         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1286                         i < dev->data->nb_rx_queues; i++) {
1287                 if (dev->data->rx_queues[i] == NULL)
1288                         continue;
1289                 vq = dev->data->rx_queues[i];
1290                 stats->q_ipackets[i] = vq->stats.pkts;
1291                 rx_total += stats->q_ipackets[i];
1292
1293                 stats->q_ibytes[i] = vq->stats.bytes;
1294                 rx_total_bytes += stats->q_ibytes[i];
1295         }
1296
1297         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1298                         i < dev->data->nb_tx_queues; i++) {
1299                 if (dev->data->tx_queues[i] == NULL)
1300                         continue;
1301                 vq = dev->data->tx_queues[i];
1302                 stats->q_opackets[i] = vq->stats.pkts;
1303                 tx_total += stats->q_opackets[i];
1304
1305                 stats->q_obytes[i] = vq->stats.bytes;
1306                 tx_total_bytes += stats->q_obytes[i];
1307         }
1308
1309         stats->ipackets = rx_total;
1310         stats->opackets = tx_total;
1311         stats->ibytes = rx_total_bytes;
1312         stats->obytes = tx_total_bytes;
1313
1314         return 0;
1315 }
1316
1317 static int
1318 eth_stats_reset(struct rte_eth_dev *dev)
1319 {
1320         struct vhost_queue *vq;
1321         unsigned i;
1322
1323         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1324                 if (dev->data->rx_queues[i] == NULL)
1325                         continue;
1326                 vq = dev->data->rx_queues[i];
1327                 vq->stats.pkts = 0;
1328                 vq->stats.bytes = 0;
1329         }
1330         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1331                 if (dev->data->tx_queues[i] == NULL)
1332                         continue;
1333                 vq = dev->data->tx_queues[i];
1334                 vq->stats.pkts = 0;
1335                 vq->stats.bytes = 0;
1336                 vq->stats.missed_pkts = 0;
1337         }
1338
1339         return 0;
1340 }
1341
1342 static void
1343 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1344 {
1345         rte_free(dev->data->rx_queues[qid]);
1346 }
1347
1348 static void
1349 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1350 {
1351         rte_free(dev->data->tx_queues[qid]);
1352 }
1353
1354 static int
1355 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1356 {
1357         /*
1358          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1359          * and releases mbuf, so nothing to cleanup.
1360          */
1361         return 0;
1362 }
1363
1364 static int
1365 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1366                 int wait_to_complete __rte_unused)
1367 {
1368         return 0;
1369 }
1370
1371 static uint32_t
1372 eth_rx_queue_count(void *rx_queue)
1373 {
1374         struct vhost_queue *vq;
1375
1376         vq = rx_queue;
1377         if (vq == NULL)
1378                 return 0;
1379
1380         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1381 }
1382
1383 static const struct eth_dev_ops ops = {
1384         .dev_start = eth_dev_start,
1385         .dev_stop = eth_dev_stop,
1386         .dev_close = eth_dev_close,
1387         .dev_configure = eth_dev_configure,
1388         .dev_infos_get = eth_dev_info,
1389         .rx_queue_setup = eth_rx_queue_setup,
1390         .tx_queue_setup = eth_tx_queue_setup,
1391         .rx_queue_release = eth_rx_queue_release,
1392         .tx_queue_release = eth_tx_queue_release,
1393         .tx_done_cleanup = eth_tx_done_cleanup,
1394         .link_update = eth_link_update,
1395         .stats_get = eth_stats_get,
1396         .stats_reset = eth_stats_reset,
1397         .xstats_reset = vhost_dev_xstats_reset,
1398         .xstats_get = vhost_dev_xstats_get,
1399         .xstats_get_names = vhost_dev_xstats_get_names,
1400         .rx_queue_intr_enable = eth_rxq_intr_enable,
1401         .rx_queue_intr_disable = eth_rxq_intr_disable,
1402 };
1403
1404 static int
1405 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1406         int16_t queues, const unsigned int numa_node, uint64_t flags,
1407         uint64_t disable_flags)
1408 {
1409         const char *name = rte_vdev_device_name(dev);
1410         struct rte_eth_dev_data *data;
1411         struct pmd_internal *internal = NULL;
1412         struct rte_eth_dev *eth_dev = NULL;
1413         struct rte_ether_addr *eth_addr = NULL;
1414
1415         VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1416                 numa_node);
1417
1418         /* reserve an ethdev entry */
1419         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1420         if (eth_dev == NULL)
1421                 goto error;
1422         data = eth_dev->data;
1423
1424         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1425         if (eth_addr == NULL)
1426                 goto error;
1427         data->mac_addrs = eth_addr;
1428         *eth_addr = base_eth_addr;
1429         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1430
1431         /* now put it all together
1432          * - store queue data in internal,
1433          * - point eth_dev_data to internals
1434          * - and point eth_dev structure to new eth_dev_data structure
1435          */
1436         internal = eth_dev->data->dev_private;
1437         internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1438                                                  0, numa_node);
1439         if (internal->iface_name == NULL)
1440                 goto error;
1441         strcpy(internal->iface_name, iface_name);
1442
1443         data->nb_rx_queues = queues;
1444         data->nb_tx_queues = queues;
1445         internal->max_queues = queues;
1446         internal->vid = -1;
1447         internal->flags = flags;
1448         internal->disable_flags = disable_flags;
1449         data->dev_link = pmd_link;
1450         data->dev_flags = RTE_ETH_DEV_INTR_LSC |
1451                                 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1452         data->promiscuous = 1;
1453         data->all_multicast = 1;
1454
1455         eth_dev->dev_ops = &ops;
1456         eth_dev->rx_queue_count = eth_rx_queue_count;
1457
1458         /* finally assign rx and tx ops */
1459         eth_dev->rx_pkt_burst = eth_vhost_rx;
1460         eth_dev->tx_pkt_burst = eth_vhost_tx;
1461
1462         rte_eth_dev_probing_finish(eth_dev);
1463         return 0;
1464
1465 error:
1466         if (internal)
1467                 rte_free(internal->iface_name);
1468         rte_eth_dev_release_port(eth_dev);
1469
1470         return -1;
1471 }
1472
1473 static inline int
1474 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1475 {
1476         const char **iface_name = extra_args;
1477
1478         if (value == NULL)
1479                 return -1;
1480
1481         *iface_name = value;
1482
1483         return 0;
1484 }
1485
1486 static inline int
1487 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1488 {
1489         uint16_t *n = extra_args;
1490
1491         if (value == NULL || extra_args == NULL)
1492                 return -EINVAL;
1493
1494         *n = (uint16_t)strtoul(value, NULL, 0);
1495         if (*n == USHRT_MAX && errno == ERANGE)
1496                 return -1;
1497
1498         return 0;
1499 }
1500
1501 static int
1502 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1503 {
1504         struct rte_kvargs *kvlist = NULL;
1505         int ret = 0;
1506         char *iface_name;
1507         uint16_t queues;
1508         uint64_t flags = 0;
1509         uint64_t disable_flags = 0;
1510         int client_mode = 0;
1511         int iommu_support = 0;
1512         int postcopy_support = 0;
1513         int tso = 0;
1514         int linear_buf = 0;
1515         int ext_buf = 0;
1516         struct rte_eth_dev *eth_dev;
1517         const char *name = rte_vdev_device_name(dev);
1518
1519         VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1520
1521         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1522                 eth_dev = rte_eth_dev_attach_secondary(name);
1523                 if (!eth_dev) {
1524                         VHOST_LOG(ERR, "Failed to probe %s\n", name);
1525                         return -1;
1526                 }
1527                 eth_dev->rx_pkt_burst = eth_vhost_rx;
1528                 eth_dev->tx_pkt_burst = eth_vhost_tx;
1529                 eth_dev->dev_ops = &ops;
1530                 if (dev->device.numa_node == SOCKET_ID_ANY)
1531                         dev->device.numa_node = rte_socket_id();
1532                 eth_dev->device = &dev->device;
1533                 rte_eth_dev_probing_finish(eth_dev);
1534                 return 0;
1535         }
1536
1537         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1538         if (kvlist == NULL)
1539                 return -1;
1540
1541         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1542                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1543                                          &open_iface, &iface_name);
1544                 if (ret < 0)
1545                         goto out_free;
1546         } else {
1547                 ret = -1;
1548                 goto out_free;
1549         }
1550
1551         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1552                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1553                                          &open_int, &queues);
1554                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1555                         goto out_free;
1556
1557         } else
1558                 queues = 1;
1559
1560         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1561                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1562                                          &open_int, &client_mode);
1563                 if (ret < 0)
1564                         goto out_free;
1565
1566                 if (client_mode)
1567                         flags |= RTE_VHOST_USER_CLIENT;
1568         }
1569
1570         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1571                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1572                                          &open_int, &iommu_support);
1573                 if (ret < 0)
1574                         goto out_free;
1575
1576                 if (iommu_support)
1577                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1578         }
1579
1580         if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1581                 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1582                                          &open_int, &postcopy_support);
1583                 if (ret < 0)
1584                         goto out_free;
1585
1586                 if (postcopy_support)
1587                         flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1588         }
1589
1590         if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1591                 ret = rte_kvargs_process(kvlist,
1592                                 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1593                                 &open_int, &tso);
1594                 if (ret < 0)
1595                         goto out_free;
1596
1597                 if (tso == 0) {
1598                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1599                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1600                 }
1601         }
1602
1603         if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1604                 ret = rte_kvargs_process(kvlist,
1605                                 ETH_VHOST_LINEAR_BUF,
1606                                 &open_int, &linear_buf);
1607                 if (ret < 0)
1608                         goto out_free;
1609
1610                 if (linear_buf == 1)
1611                         flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1612         }
1613
1614         if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1615                 ret = rte_kvargs_process(kvlist,
1616                                 ETH_VHOST_EXT_BUF,
1617                                 &open_int, &ext_buf);
1618                 if (ret < 0)
1619                         goto out_free;
1620
1621                 if (ext_buf == 1)
1622                         flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1623         }
1624
1625         if (dev->device.numa_node == SOCKET_ID_ANY)
1626                 dev->device.numa_node = rte_socket_id();
1627
1628         ret = eth_dev_vhost_create(dev, iface_name, queues,
1629                                    dev->device.numa_node, flags, disable_flags);
1630         if (ret == -1)
1631                 VHOST_LOG(ERR, "Failed to create %s\n", name);
1632
1633 out_free:
1634         rte_kvargs_free(kvlist);
1635         return ret;
1636 }
1637
1638 static int
1639 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1640 {
1641         const char *name;
1642         struct rte_eth_dev *eth_dev = NULL;
1643
1644         name = rte_vdev_device_name(dev);
1645         VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1646
1647         /* find an ethdev entry */
1648         eth_dev = rte_eth_dev_allocated(name);
1649         if (eth_dev == NULL)
1650                 return 0;
1651
1652         eth_dev_close(eth_dev);
1653         rte_eth_dev_release_port(eth_dev);
1654
1655         return 0;
1656 }
1657
1658 static struct rte_vdev_driver pmd_vhost_drv = {
1659         .probe = rte_pmd_vhost_probe,
1660         .remove = rte_pmd_vhost_remove,
1661 };
1662
1663 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1664 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1665 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1666         "iface=<ifc> "
1667         "queues=<int> "
1668         "client=<0|1> "
1669         "iommu-support=<0|1> "
1670         "postcopy-support=<0|1> "
1671         "tso=<0|1> "
1672         "linear-buffer=<0|1> "
1673         "ext-buffer=<0|1>");