net/vhost: remove dequeue zero-copy support
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 IGEL Co., Ltd.
3  * Copyright(c) 2016-2018 Intel Corporation
4  */
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <stdbool.h>
8 #include <sys/epoll.h>
9
10 #include <rte_mbuf.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19
20 #include "rte_eth_vhost.h"
21
22 RTE_LOG_REGISTER(vhost_logtype, pmd.net.vhost, NOTICE);
23
24 #define VHOST_LOG(level, ...) \
25         rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26
27 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28
29 #define ETH_VHOST_IFACE_ARG             "iface"
30 #define ETH_VHOST_QUEUES_ARG            "queues"
31 #define ETH_VHOST_CLIENT_ARG            "client"
32 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT      "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF  "linear-buffer"
36 #define ETH_VHOST_EXT_BUF  "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
38
39 static const char *valid_arguments[] = {
40         ETH_VHOST_IFACE_ARG,
41         ETH_VHOST_QUEUES_ARG,
42         ETH_VHOST_CLIENT_ARG,
43         ETH_VHOST_IOMMU_SUPPORT,
44         ETH_VHOST_POSTCOPY_SUPPORT,
45         ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
46         ETH_VHOST_LINEAR_BUF,
47         ETH_VHOST_EXT_BUF,
48         NULL
49 };
50
51 static struct rte_ether_addr base_eth_addr = {
52         .addr_bytes = {
53                 0x56 /* V */,
54                 0x48 /* H */,
55                 0x4F /* O */,
56                 0x53 /* S */,
57                 0x54 /* T */,
58                 0x00
59         }
60 };
61
62 enum vhost_xstats_pkts {
63         VHOST_UNDERSIZE_PKT = 0,
64         VHOST_64_PKT,
65         VHOST_65_TO_127_PKT,
66         VHOST_128_TO_255_PKT,
67         VHOST_256_TO_511_PKT,
68         VHOST_512_TO_1023_PKT,
69         VHOST_1024_TO_1522_PKT,
70         VHOST_1523_TO_MAX_PKT,
71         VHOST_BROADCAST_PKT,
72         VHOST_MULTICAST_PKT,
73         VHOST_UNICAST_PKT,
74         VHOST_ERRORS_PKT,
75         VHOST_ERRORS_FRAGMENTED,
76         VHOST_ERRORS_JABBER,
77         VHOST_UNKNOWN_PROTOCOL,
78         VHOST_XSTATS_MAX,
79 };
80
81 struct vhost_stats {
82         uint64_t pkts;
83         uint64_t bytes;
84         uint64_t missed_pkts;
85         uint64_t xstats[VHOST_XSTATS_MAX];
86 };
87
88 struct vhost_queue {
89         int vid;
90         rte_atomic32_t allow_queuing;
91         rte_atomic32_t while_queuing;
92         struct pmd_internal *internal;
93         struct rte_mempool *mb_pool;
94         uint16_t port;
95         uint16_t virtqueue_id;
96         struct vhost_stats stats;
97         int intr_enable;
98         rte_spinlock_t intr_lock;
99 };
100
101 struct pmd_internal {
102         rte_atomic32_t dev_attached;
103         char *iface_name;
104         uint64_t flags;
105         uint64_t disable_flags;
106         uint16_t max_queues;
107         int vid;
108         rte_atomic32_t started;
109         uint8_t vlan_strip;
110 };
111
112 struct internal_list {
113         TAILQ_ENTRY(internal_list) next;
114         struct rte_eth_dev *eth_dev;
115 };
116
117 TAILQ_HEAD(internal_list_head, internal_list);
118 static struct internal_list_head internal_list =
119         TAILQ_HEAD_INITIALIZER(internal_list);
120
121 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
122
123 static struct rte_eth_link pmd_link = {
124                 .link_speed = 10000,
125                 .link_duplex = ETH_LINK_FULL_DUPLEX,
126                 .link_status = ETH_LINK_DOWN
127 };
128
129 struct rte_vhost_vring_state {
130         rte_spinlock_t lock;
131
132         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
133         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
134         unsigned int index;
135         unsigned int max_vring;
136 };
137
138 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
139
140 #define VHOST_XSTATS_NAME_SIZE 64
141
142 struct vhost_xstats_name_off {
143         char name[VHOST_XSTATS_NAME_SIZE];
144         uint64_t offset;
145 };
146
147 /* [rx]_is prepended to the name string here */
148 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
149         {"good_packets",
150          offsetof(struct vhost_queue, stats.pkts)},
151         {"total_bytes",
152          offsetof(struct vhost_queue, stats.bytes)},
153         {"missed_pkts",
154          offsetof(struct vhost_queue, stats.missed_pkts)},
155         {"broadcast_packets",
156          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
157         {"multicast_packets",
158          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
159         {"unicast_packets",
160          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
161          {"undersize_packets",
162          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
163         {"size_64_packets",
164          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
165         {"size_65_to_127_packets",
166          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
167         {"size_128_to_255_packets",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
169         {"size_256_to_511_packets",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
171         {"size_512_to_1023_packets",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
173         {"size_1024_to_1522_packets",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
175         {"size_1523_to_max_packets",
176          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
177         {"errors_with_bad_CRC",
178          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
179         {"fragmented_errors",
180          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
181         {"jabber_errors",
182          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
183         {"unknown_protos_packets",
184          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
185 };
186
187 /* [tx]_ is prepended to the name string here */
188 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
189         {"good_packets",
190          offsetof(struct vhost_queue, stats.pkts)},
191         {"total_bytes",
192          offsetof(struct vhost_queue, stats.bytes)},
193         {"missed_pkts",
194          offsetof(struct vhost_queue, stats.missed_pkts)},
195         {"broadcast_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
197         {"multicast_packets",
198          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
199         {"unicast_packets",
200          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
201         {"undersize_packets",
202          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
203         {"size_64_packets",
204          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
205         {"size_65_to_127_packets",
206          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
207         {"size_128_to_255_packets",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
209         {"size_256_to_511_packets",
210          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
211         {"size_512_to_1023_packets",
212          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
213         {"size_1024_to_1522_packets",
214          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
215         {"size_1523_to_max_packets",
216          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
217         {"errors_with_bad_CRC",
218          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
219 };
220
221 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
222                                 sizeof(vhost_rxport_stat_strings[0]))
223
224 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
225                                 sizeof(vhost_txport_stat_strings[0]))
226
227 static int
228 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
229 {
230         struct vhost_queue *vq = NULL;
231         unsigned int i = 0;
232
233         for (i = 0; i < dev->data->nb_rx_queues; i++) {
234                 vq = dev->data->rx_queues[i];
235                 if (!vq)
236                         continue;
237                 memset(&vq->stats, 0, sizeof(vq->stats));
238         }
239         for (i = 0; i < dev->data->nb_tx_queues; i++) {
240                 vq = dev->data->tx_queues[i];
241                 if (!vq)
242                         continue;
243                 memset(&vq->stats, 0, sizeof(vq->stats));
244         }
245
246         return 0;
247 }
248
249 static int
250 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
251                            struct rte_eth_xstat_name *xstats_names,
252                            unsigned int limit __rte_unused)
253 {
254         unsigned int t = 0;
255         int count = 0;
256         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
257
258         if (!xstats_names)
259                 return nstats;
260         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
261                 snprintf(xstats_names[count].name,
262                          sizeof(xstats_names[count].name),
263                          "rx_%s", vhost_rxport_stat_strings[t].name);
264                 count++;
265         }
266         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
267                 snprintf(xstats_names[count].name,
268                          sizeof(xstats_names[count].name),
269                          "tx_%s", vhost_txport_stat_strings[t].name);
270                 count++;
271         }
272         return count;
273 }
274
275 static int
276 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
277                      unsigned int n)
278 {
279         unsigned int i;
280         unsigned int t;
281         unsigned int count = 0;
282         struct vhost_queue *vq = NULL;
283         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
284
285         if (n < nxstats)
286                 return nxstats;
287
288         for (i = 0; i < dev->data->nb_rx_queues; i++) {
289                 vq = dev->data->rx_queues[i];
290                 if (!vq)
291                         continue;
292                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
293                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
294                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
295         }
296         for (i = 0; i < dev->data->nb_tx_queues; i++) {
297                 vq = dev->data->tx_queues[i];
298                 if (!vq)
299                         continue;
300                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
301                                 + vq->stats.missed_pkts
302                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
303                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
304         }
305         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
306                 xstats[count].value = 0;
307                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
308                         vq = dev->data->rx_queues[i];
309                         if (!vq)
310                                 continue;
311                         xstats[count].value +=
312                                 *(uint64_t *)(((char *)vq)
313                                 + vhost_rxport_stat_strings[t].offset);
314                 }
315                 xstats[count].id = count;
316                 count++;
317         }
318         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
319                 xstats[count].value = 0;
320                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
321                         vq = dev->data->tx_queues[i];
322                         if (!vq)
323                                 continue;
324                         xstats[count].value +=
325                                 *(uint64_t *)(((char *)vq)
326                                 + vhost_txport_stat_strings[t].offset);
327                 }
328                 xstats[count].id = count;
329                 count++;
330         }
331         return count;
332 }
333
334 static inline void
335 vhost_count_multicast_broadcast(struct vhost_queue *vq,
336                                 struct rte_mbuf *mbuf)
337 {
338         struct rte_ether_addr *ea = NULL;
339         struct vhost_stats *pstats = &vq->stats;
340
341         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
342         if (rte_is_multicast_ether_addr(ea)) {
343                 if (rte_is_broadcast_ether_addr(ea))
344                         pstats->xstats[VHOST_BROADCAST_PKT]++;
345                 else
346                         pstats->xstats[VHOST_MULTICAST_PKT]++;
347         }
348 }
349
350 static void
351 vhost_update_packet_xstats(struct vhost_queue *vq,
352                            struct rte_mbuf **bufs,
353                            uint16_t count)
354 {
355         uint32_t pkt_len = 0;
356         uint64_t i = 0;
357         uint64_t index;
358         struct vhost_stats *pstats = &vq->stats;
359
360         for (i = 0; i < count ; i++) {
361                 pkt_len = bufs[i]->pkt_len;
362                 if (pkt_len == 64) {
363                         pstats->xstats[VHOST_64_PKT]++;
364                 } else if (pkt_len > 64 && pkt_len < 1024) {
365                         index = (sizeof(pkt_len) * 8)
366                                 - __builtin_clz(pkt_len) - 5;
367                         pstats->xstats[index]++;
368                 } else {
369                         if (pkt_len < 64)
370                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
371                         else if (pkt_len <= 1522)
372                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
373                         else if (pkt_len > 1522)
374                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
375                 }
376                 vhost_count_multicast_broadcast(vq, bufs[i]);
377         }
378 }
379
380 static uint16_t
381 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
382 {
383         struct vhost_queue *r = q;
384         uint16_t i, nb_rx = 0;
385         uint16_t nb_receive = nb_bufs;
386
387         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
388                 return 0;
389
390         rte_atomic32_set(&r->while_queuing, 1);
391
392         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
393                 goto out;
394
395         /* Dequeue packets from guest TX queue */
396         while (nb_receive) {
397                 uint16_t nb_pkts;
398                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
399                                                  VHOST_MAX_PKT_BURST);
400
401                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
402                                                   r->mb_pool, &bufs[nb_rx],
403                                                   num);
404
405                 nb_rx += nb_pkts;
406                 nb_receive -= nb_pkts;
407                 if (nb_pkts < num)
408                         break;
409         }
410
411         r->stats.pkts += nb_rx;
412
413         for (i = 0; likely(i < nb_rx); i++) {
414                 bufs[i]->port = r->port;
415                 bufs[i]->vlan_tci = 0;
416
417                 if (r->internal->vlan_strip)
418                         rte_vlan_strip(bufs[i]);
419
420                 r->stats.bytes += bufs[i]->pkt_len;
421         }
422
423         vhost_update_packet_xstats(r, bufs, nb_rx);
424
425 out:
426         rte_atomic32_set(&r->while_queuing, 0);
427
428         return nb_rx;
429 }
430
431 static uint16_t
432 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
433 {
434         struct vhost_queue *r = q;
435         uint16_t i, nb_tx = 0;
436         uint16_t nb_send = 0;
437
438         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
439                 return 0;
440
441         rte_atomic32_set(&r->while_queuing, 1);
442
443         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
444                 goto out;
445
446         for (i = 0; i < nb_bufs; i++) {
447                 struct rte_mbuf *m = bufs[i];
448
449                 /* Do VLAN tag insertion */
450                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
451                         int error = rte_vlan_insert(&m);
452                         if (unlikely(error)) {
453                                 rte_pktmbuf_free(m);
454                                 continue;
455                         }
456                 }
457
458                 bufs[nb_send] = m;
459                 ++nb_send;
460         }
461
462         /* Enqueue packets to guest RX queue */
463         while (nb_send) {
464                 uint16_t nb_pkts;
465                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
466                                                  VHOST_MAX_PKT_BURST);
467
468                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
469                                                   &bufs[nb_tx], num);
470
471                 nb_tx += nb_pkts;
472                 nb_send -= nb_pkts;
473                 if (nb_pkts < num)
474                         break;
475         }
476
477         r->stats.pkts += nb_tx;
478         r->stats.missed_pkts += nb_bufs - nb_tx;
479
480         for (i = 0; likely(i < nb_tx); i++)
481                 r->stats.bytes += bufs[i]->pkt_len;
482
483         vhost_update_packet_xstats(r, bufs, nb_tx);
484
485         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
486          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
487          * are increased when packets are not transmitted successfully.
488          */
489         for (i = nb_tx; i < nb_bufs; i++)
490                 vhost_count_multicast_broadcast(r, bufs[i]);
491
492         for (i = 0; likely(i < nb_tx); i++)
493                 rte_pktmbuf_free(bufs[i]);
494 out:
495         rte_atomic32_set(&r->while_queuing, 0);
496
497         return nb_tx;
498 }
499
500 static inline struct internal_list *
501 find_internal_resource(char *ifname)
502 {
503         int found = 0;
504         struct internal_list *list;
505         struct pmd_internal *internal;
506
507         if (ifname == NULL)
508                 return NULL;
509
510         pthread_mutex_lock(&internal_list_lock);
511
512         TAILQ_FOREACH(list, &internal_list, next) {
513                 internal = list->eth_dev->data->dev_private;
514                 if (!strcmp(internal->iface_name, ifname)) {
515                         found = 1;
516                         break;
517                 }
518         }
519
520         pthread_mutex_unlock(&internal_list_lock);
521
522         if (!found)
523                 return NULL;
524
525         return list;
526 }
527
528 static int
529 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
530 {
531         struct rte_intr_handle *handle = eth_dev->intr_handle;
532         struct rte_epoll_event rev;
533         int epfd, ret;
534
535         if (!handle)
536                 return 0;
537
538         if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd)
539                 return 0;
540
541         VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
542                         rxq_idx);
543
544         if (handle->elist[rxq_idx].fd != -1)
545                 VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
546                                 handle->elist[rxq_idx].fd);
547
548         /*
549          * First remove invalid epoll event, and then install
550          * the new one. May be solved with a proper API in the
551          * future.
552          */
553         epfd = handle->elist[rxq_idx].epfd;
554         rev = handle->elist[rxq_idx];
555         ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
556                         &handle->elist[rxq_idx]);
557         if (ret) {
558                 VHOST_LOG(ERR, "Delete epoll event failed.\n");
559                 return ret;
560         }
561
562         rev.fd = handle->efds[rxq_idx];
563         handle->elist[rxq_idx] = rev;
564         ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd,
565                         &handle->elist[rxq_idx]);
566         if (ret) {
567                 VHOST_LOG(ERR, "Add epoll event failed.\n");
568                 return ret;
569         }
570
571         return 0;
572 }
573
574 static int
575 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
576 {
577         struct rte_vhost_vring vring;
578         struct vhost_queue *vq;
579         int old_intr_enable, ret = 0;
580
581         vq = dev->data->rx_queues[qid];
582         if (!vq) {
583                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
584                 return -1;
585         }
586
587         rte_spinlock_lock(&vq->intr_lock);
588         old_intr_enable = vq->intr_enable;
589         vq->intr_enable = 1;
590         ret = eth_vhost_update_intr(dev, qid);
591         rte_spinlock_unlock(&vq->intr_lock);
592
593         if (ret < 0) {
594                 VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
595                 vq->intr_enable = old_intr_enable;
596                 return ret;
597         }
598
599         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
600         if (ret < 0) {
601                 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
602                 return ret;
603         }
604         VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
605         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
606         rte_wmb();
607
608         return ret;
609 }
610
611 static int
612 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
613 {
614         struct rte_vhost_vring vring;
615         struct vhost_queue *vq;
616         int ret = 0;
617
618         vq = dev->data->rx_queues[qid];
619         if (!vq) {
620                 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
621                 return -1;
622         }
623
624         ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
625         if (ret < 0) {
626                 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
627                 return ret;
628         }
629         VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
630         rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
631         rte_wmb();
632
633         vq->intr_enable = 0;
634
635         return 0;
636 }
637
638 static void
639 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
640 {
641         struct rte_intr_handle *intr_handle = dev->intr_handle;
642
643         if (intr_handle) {
644                 if (intr_handle->intr_vec)
645                         free(intr_handle->intr_vec);
646                 free(intr_handle);
647         }
648
649         dev->intr_handle = NULL;
650 }
651
652 static int
653 eth_vhost_install_intr(struct rte_eth_dev *dev)
654 {
655         struct rte_vhost_vring vring;
656         struct vhost_queue *vq;
657         int nb_rxq = dev->data->nb_rx_queues;
658         int i;
659         int ret;
660
661         /* uninstall firstly if we are reconnecting */
662         if (dev->intr_handle)
663                 eth_vhost_uninstall_intr(dev);
664
665         dev->intr_handle = malloc(sizeof(*dev->intr_handle));
666         if (!dev->intr_handle) {
667                 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
668                 return -ENOMEM;
669         }
670         memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
671
672         dev->intr_handle->efd_counter_size = sizeof(uint64_t);
673
674         dev->intr_handle->intr_vec =
675                 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
676
677         if (!dev->intr_handle->intr_vec) {
678                 VHOST_LOG(ERR,
679                         "Failed to allocate memory for interrupt vector\n");
680                 free(dev->intr_handle);
681                 return -ENOMEM;
682         }
683
684         VHOST_LOG(INFO, "Prepare intr vec\n");
685         for (i = 0; i < nb_rxq; i++) {
686                 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
687                 dev->intr_handle->efds[i] = -1;
688                 vq = dev->data->rx_queues[i];
689                 if (!vq) {
690                         VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
691                         continue;
692                 }
693
694                 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
695                 if (ret < 0) {
696                         VHOST_LOG(INFO,
697                                 "Failed to get rxq-%d's vring, skip!\n", i);
698                         continue;
699                 }
700
701                 if (vring.kickfd < 0) {
702                         VHOST_LOG(INFO,
703                                 "rxq-%d's kickfd is invalid, skip!\n", i);
704                         continue;
705                 }
706                 dev->intr_handle->efds[i] = vring.kickfd;
707                 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
708         }
709
710         dev->intr_handle->nb_efd = nb_rxq;
711         dev->intr_handle->max_intr = nb_rxq + 1;
712         dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
713
714         return 0;
715 }
716
717 static void
718 update_queuing_status(struct rte_eth_dev *dev)
719 {
720         struct pmd_internal *internal = dev->data->dev_private;
721         struct vhost_queue *vq;
722         unsigned int i;
723         int allow_queuing = 1;
724
725         if (!dev->data->rx_queues || !dev->data->tx_queues)
726                 return;
727
728         if (rte_atomic32_read(&internal->started) == 0 ||
729             rte_atomic32_read(&internal->dev_attached) == 0)
730                 allow_queuing = 0;
731
732         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
733         for (i = 0; i < dev->data->nb_rx_queues; i++) {
734                 vq = dev->data->rx_queues[i];
735                 if (vq == NULL)
736                         continue;
737                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
738                 while (rte_atomic32_read(&vq->while_queuing))
739                         rte_pause();
740         }
741
742         for (i = 0; i < dev->data->nb_tx_queues; i++) {
743                 vq = dev->data->tx_queues[i];
744                 if (vq == NULL)
745                         continue;
746                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
747                 while (rte_atomic32_read(&vq->while_queuing))
748                         rte_pause();
749         }
750 }
751
752 static void
753 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
754 {
755         struct vhost_queue *vq;
756         int i;
757
758         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
759                 vq = eth_dev->data->rx_queues[i];
760                 if (!vq)
761                         continue;
762                 vq->vid = internal->vid;
763                 vq->internal = internal;
764                 vq->port = eth_dev->data->port_id;
765         }
766         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
767                 vq = eth_dev->data->tx_queues[i];
768                 if (!vq)
769                         continue;
770                 vq->vid = internal->vid;
771                 vq->internal = internal;
772                 vq->port = eth_dev->data->port_id;
773         }
774 }
775
776 static int
777 new_device(int vid)
778 {
779         struct rte_eth_dev *eth_dev;
780         struct internal_list *list;
781         struct pmd_internal *internal;
782         struct rte_eth_conf *dev_conf;
783         unsigned i;
784         char ifname[PATH_MAX];
785 #ifdef RTE_LIBRTE_VHOST_NUMA
786         int newnode;
787 #endif
788
789         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
790         list = find_internal_resource(ifname);
791         if (list == NULL) {
792                 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
793                 return -1;
794         }
795
796         eth_dev = list->eth_dev;
797         internal = eth_dev->data->dev_private;
798         dev_conf = &eth_dev->data->dev_conf;
799
800 #ifdef RTE_LIBRTE_VHOST_NUMA
801         newnode = rte_vhost_get_numa_node(vid);
802         if (newnode >= 0)
803                 eth_dev->data->numa_node = newnode;
804 #endif
805
806         internal->vid = vid;
807         if (rte_atomic32_read(&internal->started) == 1) {
808                 queue_setup(eth_dev, internal);
809
810                 if (dev_conf->intr_conf.rxq) {
811                         if (eth_vhost_install_intr(eth_dev) < 0) {
812                                 VHOST_LOG(INFO,
813                                         "Failed to install interrupt handler.");
814                                         return -1;
815                         }
816                 }
817         } else {
818                 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
819         }
820
821         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
822                 rte_vhost_enable_guest_notification(vid, i, 0);
823
824         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
825
826         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
827
828         rte_atomic32_set(&internal->dev_attached, 1);
829         update_queuing_status(eth_dev);
830
831         VHOST_LOG(INFO, "Vhost device %d created\n", vid);
832
833         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
834
835         return 0;
836 }
837
838 static void
839 destroy_device(int vid)
840 {
841         struct rte_eth_dev *eth_dev;
842         struct pmd_internal *internal;
843         struct vhost_queue *vq;
844         struct internal_list *list;
845         char ifname[PATH_MAX];
846         unsigned i;
847         struct rte_vhost_vring_state *state;
848
849         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
850         list = find_internal_resource(ifname);
851         if (list == NULL) {
852                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
853                 return;
854         }
855         eth_dev = list->eth_dev;
856         internal = eth_dev->data->dev_private;
857
858         rte_atomic32_set(&internal->dev_attached, 0);
859         update_queuing_status(eth_dev);
860
861         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
862
863         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
864                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
865                         vq = eth_dev->data->rx_queues[i];
866                         if (!vq)
867                                 continue;
868                         vq->vid = -1;
869                 }
870                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
871                         vq = eth_dev->data->tx_queues[i];
872                         if (!vq)
873                                 continue;
874                         vq->vid = -1;
875                 }
876         }
877
878         state = vring_states[eth_dev->data->port_id];
879         rte_spinlock_lock(&state->lock);
880         for (i = 0; i <= state->max_vring; i++) {
881                 state->cur[i] = false;
882                 state->seen[i] = false;
883         }
884         state->max_vring = 0;
885         rte_spinlock_unlock(&state->lock);
886
887         VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
888         eth_vhost_uninstall_intr(eth_dev);
889
890         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
891 }
892
893 static int
894 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
895 {
896         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
897         struct pmd_internal *internal = eth_dev->data->dev_private;
898         struct vhost_queue *vq;
899         struct rte_vhost_vring vring;
900         int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
901         int ret = 0;
902
903         /*
904          * The vring kickfd may be changed after the new device notification.
905          * Update it when the vring state is updated.
906          */
907         if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
908             rte_atomic32_read(&internal->dev_attached) &&
909             rte_atomic32_read(&internal->started) &&
910             dev_conf->intr_conf.rxq) {
911                 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
912                 if (ret) {
913                         VHOST_LOG(ERR, "Failed to get vring %d information.\n",
914                                         vring_id);
915                         return ret;
916                 }
917                 eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
918
919                 vq = eth_dev->data->rx_queues[rx_idx];
920                 if (!vq) {
921                         VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
922                         return -1;
923                 }
924
925                 rte_spinlock_lock(&vq->intr_lock);
926                 if (vq->intr_enable)
927                         ret = eth_vhost_update_intr(eth_dev, rx_idx);
928                 rte_spinlock_unlock(&vq->intr_lock);
929         }
930
931         return ret;
932 }
933
934 static int
935 vring_state_changed(int vid, uint16_t vring, int enable)
936 {
937         struct rte_vhost_vring_state *state;
938         struct rte_eth_dev *eth_dev;
939         struct internal_list *list;
940         char ifname[PATH_MAX];
941
942         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
943         list = find_internal_resource(ifname);
944         if (list == NULL) {
945                 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
946                 return -1;
947         }
948
949         eth_dev = list->eth_dev;
950         /* won't be NULL */
951         state = vring_states[eth_dev->data->port_id];
952
953         if (enable && vring_conf_update(vid, eth_dev, vring))
954                 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
955                           (int)vring);
956
957         rte_spinlock_lock(&state->lock);
958         if (state->cur[vring] == enable) {
959                 rte_spinlock_unlock(&state->lock);
960                 return 0;
961         }
962         state->cur[vring] = enable;
963         state->max_vring = RTE_MAX(vring, state->max_vring);
964         rte_spinlock_unlock(&state->lock);
965
966         VHOST_LOG(INFO, "vring%u is %s\n",
967                         vring, enable ? "enabled" : "disabled");
968
969         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
970
971         return 0;
972 }
973
974 static struct vhost_device_ops vhost_ops = {
975         .new_device          = new_device,
976         .destroy_device      = destroy_device,
977         .vring_state_changed = vring_state_changed,
978 };
979
980 static int
981 vhost_driver_setup(struct rte_eth_dev *eth_dev)
982 {
983         struct pmd_internal *internal = eth_dev->data->dev_private;
984         struct internal_list *list = NULL;
985         struct rte_vhost_vring_state *vring_state = NULL;
986         unsigned int numa_node = eth_dev->device->numa_node;
987         const char *name = eth_dev->device->name;
988
989         /* Don't try to setup again if it has already been done. */
990         list = find_internal_resource(internal->iface_name);
991         if (list)
992                 return 0;
993
994         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
995         if (list == NULL)
996                 return -1;
997
998         vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
999                                          0, numa_node);
1000         if (vring_state == NULL)
1001                 goto free_list;
1002
1003         list->eth_dev = eth_dev;
1004         pthread_mutex_lock(&internal_list_lock);
1005         TAILQ_INSERT_TAIL(&internal_list, list, next);
1006         pthread_mutex_unlock(&internal_list_lock);
1007
1008         rte_spinlock_init(&vring_state->lock);
1009         vring_states[eth_dev->data->port_id] = vring_state;
1010
1011         if (rte_vhost_driver_register(internal->iface_name, internal->flags))
1012                 goto list_remove;
1013
1014         if (internal->disable_flags) {
1015                 if (rte_vhost_driver_disable_features(internal->iface_name,
1016                                                       internal->disable_flags))
1017                         goto drv_unreg;
1018         }
1019
1020         if (rte_vhost_driver_callback_register(internal->iface_name,
1021                                                &vhost_ops) < 0) {
1022                 VHOST_LOG(ERR, "Can't register callbacks\n");
1023                 goto drv_unreg;
1024         }
1025
1026         if (rte_vhost_driver_start(internal->iface_name) < 0) {
1027                 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1028                           internal->iface_name);
1029                 goto drv_unreg;
1030         }
1031
1032         return 0;
1033
1034 drv_unreg:
1035         rte_vhost_driver_unregister(internal->iface_name);
1036 list_remove:
1037         vring_states[eth_dev->data->port_id] = NULL;
1038         pthread_mutex_lock(&internal_list_lock);
1039         TAILQ_REMOVE(&internal_list, list, next);
1040         pthread_mutex_unlock(&internal_list_lock);
1041         rte_free(vring_state);
1042 free_list:
1043         rte_free(list);
1044
1045         return -1;
1046 }
1047
1048 int
1049 rte_eth_vhost_get_queue_event(uint16_t port_id,
1050                 struct rte_eth_vhost_queue_event *event)
1051 {
1052         struct rte_vhost_vring_state *state;
1053         unsigned int i;
1054         int idx;
1055
1056         if (port_id >= RTE_MAX_ETHPORTS) {
1057                 VHOST_LOG(ERR, "Invalid port id\n");
1058                 return -1;
1059         }
1060
1061         state = vring_states[port_id];
1062         if (!state) {
1063                 VHOST_LOG(ERR, "Unused port\n");
1064                 return -1;
1065         }
1066
1067         rte_spinlock_lock(&state->lock);
1068         for (i = 0; i <= state->max_vring; i++) {
1069                 idx = state->index++ % (state->max_vring + 1);
1070
1071                 if (state->cur[idx] != state->seen[idx]) {
1072                         state->seen[idx] = state->cur[idx];
1073                         event->queue_id = idx / 2;
1074                         event->rx = idx & 1;
1075                         event->enable = state->cur[idx];
1076                         rte_spinlock_unlock(&state->lock);
1077                         return 0;
1078                 }
1079         }
1080         rte_spinlock_unlock(&state->lock);
1081
1082         return -1;
1083 }
1084
1085 int
1086 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1087 {
1088         struct internal_list *list;
1089         struct rte_eth_dev *eth_dev;
1090         struct vhost_queue *vq;
1091         int vid = -1;
1092
1093         if (!rte_eth_dev_is_valid_port(port_id))
1094                 return -1;
1095
1096         pthread_mutex_lock(&internal_list_lock);
1097
1098         TAILQ_FOREACH(list, &internal_list, next) {
1099                 eth_dev = list->eth_dev;
1100                 if (eth_dev->data->port_id == port_id) {
1101                         vq = eth_dev->data->rx_queues[0];
1102                         if (vq) {
1103                                 vid = vq->vid;
1104                         }
1105                         break;
1106                 }
1107         }
1108
1109         pthread_mutex_unlock(&internal_list_lock);
1110
1111         return vid;
1112 }
1113
1114 static int
1115 eth_dev_configure(struct rte_eth_dev *dev)
1116 {
1117         struct pmd_internal *internal = dev->data->dev_private;
1118         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1119
1120         /* NOTE: the same process has to operate a vhost interface
1121          * from beginning to end (from eth_dev configure to eth_dev close).
1122          * It is user's responsibility at the moment.
1123          */
1124         if (vhost_driver_setup(dev) < 0)
1125                 return -1;
1126
1127         internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1128
1129         return 0;
1130 }
1131
1132 static int
1133 eth_dev_start(struct rte_eth_dev *eth_dev)
1134 {
1135         struct pmd_internal *internal = eth_dev->data->dev_private;
1136         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
1137
1138         queue_setup(eth_dev, internal);
1139
1140         if (rte_atomic32_read(&internal->dev_attached) == 1) {
1141                 if (dev_conf->intr_conf.rxq) {
1142                         if (eth_vhost_install_intr(eth_dev) < 0) {
1143                                 VHOST_LOG(INFO,
1144                                         "Failed to install interrupt handler.");
1145                                         return -1;
1146                         }
1147                 }
1148         }
1149
1150         rte_atomic32_set(&internal->started, 1);
1151         update_queuing_status(eth_dev);
1152
1153         return 0;
1154 }
1155
1156 static void
1157 eth_dev_stop(struct rte_eth_dev *dev)
1158 {
1159         struct pmd_internal *internal = dev->data->dev_private;
1160
1161         rte_atomic32_set(&internal->started, 0);
1162         update_queuing_status(dev);
1163 }
1164
1165 static int
1166 eth_dev_close(struct rte_eth_dev *dev)
1167 {
1168         struct pmd_internal *internal;
1169         struct internal_list *list;
1170         unsigned int i;
1171
1172         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1173                 return 0;
1174
1175         internal = dev->data->dev_private;
1176         if (!internal)
1177                 return 0;
1178
1179         eth_dev_stop(dev);
1180
1181         list = find_internal_resource(internal->iface_name);
1182         if (list) {
1183                 rte_vhost_driver_unregister(internal->iface_name);
1184                 pthread_mutex_lock(&internal_list_lock);
1185                 TAILQ_REMOVE(&internal_list, list, next);
1186                 pthread_mutex_unlock(&internal_list_lock);
1187                 rte_free(list);
1188         }
1189
1190         if (dev->data->rx_queues)
1191                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1192                         rte_free(dev->data->rx_queues[i]);
1193
1194         if (dev->data->tx_queues)
1195                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1196                         rte_free(dev->data->tx_queues[i]);
1197
1198         rte_free(internal->iface_name);
1199         rte_free(internal);
1200
1201         dev->data->dev_private = NULL;
1202
1203         rte_free(vring_states[dev->data->port_id]);
1204         vring_states[dev->data->port_id] = NULL;
1205
1206         return 0;
1207 }
1208
1209 static int
1210 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1211                    uint16_t nb_rx_desc __rte_unused,
1212                    unsigned int socket_id,
1213                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1214                    struct rte_mempool *mb_pool)
1215 {
1216         struct vhost_queue *vq;
1217
1218         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1219                         RTE_CACHE_LINE_SIZE, socket_id);
1220         if (vq == NULL) {
1221                 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1222                 return -ENOMEM;
1223         }
1224
1225         vq->mb_pool = mb_pool;
1226         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1227         rte_spinlock_init(&vq->intr_lock);
1228         dev->data->rx_queues[rx_queue_id] = vq;
1229
1230         return 0;
1231 }
1232
1233 static int
1234 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1235                    uint16_t nb_tx_desc __rte_unused,
1236                    unsigned int socket_id,
1237                    const struct rte_eth_txconf *tx_conf __rte_unused)
1238 {
1239         struct vhost_queue *vq;
1240
1241         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1242                         RTE_CACHE_LINE_SIZE, socket_id);
1243         if (vq == NULL) {
1244                 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1245                 return -ENOMEM;
1246         }
1247
1248         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1249         rte_spinlock_init(&vq->intr_lock);
1250         dev->data->tx_queues[tx_queue_id] = vq;
1251
1252         return 0;
1253 }
1254
1255 static int
1256 eth_dev_info(struct rte_eth_dev *dev,
1257              struct rte_eth_dev_info *dev_info)
1258 {
1259         struct pmd_internal *internal;
1260
1261         internal = dev->data->dev_private;
1262         if (internal == NULL) {
1263                 VHOST_LOG(ERR, "Invalid device specified\n");
1264                 return -ENODEV;
1265         }
1266
1267         dev_info->max_mac_addrs = 1;
1268         dev_info->max_rx_pktlen = (uint32_t)-1;
1269         dev_info->max_rx_queues = internal->max_queues;
1270         dev_info->max_tx_queues = internal->max_queues;
1271         dev_info->min_rx_bufsize = 0;
1272
1273         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1274                                 DEV_TX_OFFLOAD_VLAN_INSERT;
1275         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1276
1277         return 0;
1278 }
1279
1280 static int
1281 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1282 {
1283         unsigned i;
1284         unsigned long rx_total = 0, tx_total = 0;
1285         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1286         struct vhost_queue *vq;
1287
1288         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1289                         i < dev->data->nb_rx_queues; i++) {
1290                 if (dev->data->rx_queues[i] == NULL)
1291                         continue;
1292                 vq = dev->data->rx_queues[i];
1293                 stats->q_ipackets[i] = vq->stats.pkts;
1294                 rx_total += stats->q_ipackets[i];
1295
1296                 stats->q_ibytes[i] = vq->stats.bytes;
1297                 rx_total_bytes += stats->q_ibytes[i];
1298         }
1299
1300         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1301                         i < dev->data->nb_tx_queues; i++) {
1302                 if (dev->data->tx_queues[i] == NULL)
1303                         continue;
1304                 vq = dev->data->tx_queues[i];
1305                 stats->q_opackets[i] = vq->stats.pkts;
1306                 tx_total += stats->q_opackets[i];
1307
1308                 stats->q_obytes[i] = vq->stats.bytes;
1309                 tx_total_bytes += stats->q_obytes[i];
1310         }
1311
1312         stats->ipackets = rx_total;
1313         stats->opackets = tx_total;
1314         stats->ibytes = rx_total_bytes;
1315         stats->obytes = tx_total_bytes;
1316
1317         return 0;
1318 }
1319
1320 static int
1321 eth_stats_reset(struct rte_eth_dev *dev)
1322 {
1323         struct vhost_queue *vq;
1324         unsigned i;
1325
1326         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1327                 if (dev->data->rx_queues[i] == NULL)
1328                         continue;
1329                 vq = dev->data->rx_queues[i];
1330                 vq->stats.pkts = 0;
1331                 vq->stats.bytes = 0;
1332         }
1333         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1334                 if (dev->data->tx_queues[i] == NULL)
1335                         continue;
1336                 vq = dev->data->tx_queues[i];
1337                 vq->stats.pkts = 0;
1338                 vq->stats.bytes = 0;
1339                 vq->stats.missed_pkts = 0;
1340         }
1341
1342         return 0;
1343 }
1344
1345 static void
1346 eth_queue_release(void *q)
1347 {
1348         rte_free(q);
1349 }
1350
1351 static int
1352 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1353 {
1354         /*
1355          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1356          * and releases mbuf, so nothing to cleanup.
1357          */
1358         return 0;
1359 }
1360
1361 static int
1362 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1363                 int wait_to_complete __rte_unused)
1364 {
1365         return 0;
1366 }
1367
1368 static uint32_t
1369 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1370 {
1371         struct vhost_queue *vq;
1372
1373         vq = dev->data->rx_queues[rx_queue_id];
1374         if (vq == NULL)
1375                 return 0;
1376
1377         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1378 }
1379
1380 static const struct eth_dev_ops ops = {
1381         .dev_start = eth_dev_start,
1382         .dev_stop = eth_dev_stop,
1383         .dev_close = eth_dev_close,
1384         .dev_configure = eth_dev_configure,
1385         .dev_infos_get = eth_dev_info,
1386         .rx_queue_setup = eth_rx_queue_setup,
1387         .tx_queue_setup = eth_tx_queue_setup,
1388         .rx_queue_release = eth_queue_release,
1389         .tx_queue_release = eth_queue_release,
1390         .tx_done_cleanup = eth_tx_done_cleanup,
1391         .link_update = eth_link_update,
1392         .stats_get = eth_stats_get,
1393         .stats_reset = eth_stats_reset,
1394         .xstats_reset = vhost_dev_xstats_reset,
1395         .xstats_get = vhost_dev_xstats_get,
1396         .xstats_get_names = vhost_dev_xstats_get_names,
1397         .rx_queue_intr_enable = eth_rxq_intr_enable,
1398         .rx_queue_intr_disable = eth_rxq_intr_disable,
1399 };
1400
1401 static int
1402 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1403         int16_t queues, const unsigned int numa_node, uint64_t flags,
1404         uint64_t disable_flags)
1405 {
1406         const char *name = rte_vdev_device_name(dev);
1407         struct rte_eth_dev_data *data;
1408         struct pmd_internal *internal = NULL;
1409         struct rte_eth_dev *eth_dev = NULL;
1410         struct rte_ether_addr *eth_addr = NULL;
1411
1412         VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1413                 numa_node);
1414
1415         /* reserve an ethdev entry */
1416         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1417         if (eth_dev == NULL)
1418                 goto error;
1419         data = eth_dev->data;
1420
1421         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1422         if (eth_addr == NULL)
1423                 goto error;
1424         data->mac_addrs = eth_addr;
1425         *eth_addr = base_eth_addr;
1426         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1427
1428         /* now put it all together
1429          * - store queue data in internal,
1430          * - point eth_dev_data to internals
1431          * - and point eth_dev structure to new eth_dev_data structure
1432          */
1433         internal = eth_dev->data->dev_private;
1434         internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1435                                                  0, numa_node);
1436         if (internal->iface_name == NULL)
1437                 goto error;
1438         strcpy(internal->iface_name, iface_name);
1439
1440         data->nb_rx_queues = queues;
1441         data->nb_tx_queues = queues;
1442         internal->max_queues = queues;
1443         internal->vid = -1;
1444         internal->flags = flags;
1445         internal->disable_flags = disable_flags;
1446         data->dev_link = pmd_link;
1447         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1448         data->promiscuous = 1;
1449         data->all_multicast = 1;
1450
1451         eth_dev->dev_ops = &ops;
1452         eth_dev->rx_queue_count = eth_rx_queue_count;
1453
1454         /* finally assign rx and tx ops */
1455         eth_dev->rx_pkt_burst = eth_vhost_rx;
1456         eth_dev->tx_pkt_burst = eth_vhost_tx;
1457
1458         rte_eth_dev_probing_finish(eth_dev);
1459         return 0;
1460
1461 error:
1462         if (internal)
1463                 rte_free(internal->iface_name);
1464         rte_eth_dev_release_port(eth_dev);
1465
1466         return -1;
1467 }
1468
1469 static inline int
1470 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1471 {
1472         const char **iface_name = extra_args;
1473
1474         if (value == NULL)
1475                 return -1;
1476
1477         *iface_name = value;
1478
1479         return 0;
1480 }
1481
1482 static inline int
1483 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1484 {
1485         uint16_t *n = extra_args;
1486
1487         if (value == NULL || extra_args == NULL)
1488                 return -EINVAL;
1489
1490         *n = (uint16_t)strtoul(value, NULL, 0);
1491         if (*n == USHRT_MAX && errno == ERANGE)
1492                 return -1;
1493
1494         return 0;
1495 }
1496
1497 static int
1498 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1499 {
1500         struct rte_kvargs *kvlist = NULL;
1501         int ret = 0;
1502         char *iface_name;
1503         uint16_t queues;
1504         uint64_t flags = 0;
1505         uint64_t disable_flags = 0;
1506         int client_mode = 0;
1507         int iommu_support = 0;
1508         int postcopy_support = 0;
1509         int tso = 0;
1510         int linear_buf = 0;
1511         int ext_buf = 0;
1512         struct rte_eth_dev *eth_dev;
1513         const char *name = rte_vdev_device_name(dev);
1514
1515         VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1516
1517         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1518                 eth_dev = rte_eth_dev_attach_secondary(name);
1519                 if (!eth_dev) {
1520                         VHOST_LOG(ERR, "Failed to probe %s\n", name);
1521                         return -1;
1522                 }
1523                 eth_dev->rx_pkt_burst = eth_vhost_rx;
1524                 eth_dev->tx_pkt_burst = eth_vhost_tx;
1525                 eth_dev->dev_ops = &ops;
1526                 if (dev->device.numa_node == SOCKET_ID_ANY)
1527                         dev->device.numa_node = rte_socket_id();
1528                 eth_dev->device = &dev->device;
1529                 rte_eth_dev_probing_finish(eth_dev);
1530                 return 0;
1531         }
1532
1533         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1534         if (kvlist == NULL)
1535                 return -1;
1536
1537         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1538                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1539                                          &open_iface, &iface_name);
1540                 if (ret < 0)
1541                         goto out_free;
1542         } else {
1543                 ret = -1;
1544                 goto out_free;
1545         }
1546
1547         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1548                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1549                                          &open_int, &queues);
1550                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1551                         goto out_free;
1552
1553         } else
1554                 queues = 1;
1555
1556         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1557                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1558                                          &open_int, &client_mode);
1559                 if (ret < 0)
1560                         goto out_free;
1561
1562                 if (client_mode)
1563                         flags |= RTE_VHOST_USER_CLIENT;
1564         }
1565
1566         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1567                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1568                                          &open_int, &iommu_support);
1569                 if (ret < 0)
1570                         goto out_free;
1571
1572                 if (iommu_support)
1573                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1574         }
1575
1576         if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1577                 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1578                                          &open_int, &postcopy_support);
1579                 if (ret < 0)
1580                         goto out_free;
1581
1582                 if (postcopy_support)
1583                         flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1584         }
1585
1586         if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1587                 ret = rte_kvargs_process(kvlist,
1588                                 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1589                                 &open_int, &tso);
1590                 if (ret < 0)
1591                         goto out_free;
1592
1593                 if (tso == 0) {
1594                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1595                         disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1596                 }
1597         }
1598
1599         if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1600                 ret = rte_kvargs_process(kvlist,
1601                                 ETH_VHOST_LINEAR_BUF,
1602                                 &open_int, &linear_buf);
1603                 if (ret < 0)
1604                         goto out_free;
1605
1606                 if (linear_buf == 1)
1607                         flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1608         }
1609
1610         if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1611                 ret = rte_kvargs_process(kvlist,
1612                                 ETH_VHOST_EXT_BUF,
1613                                 &open_int, &ext_buf);
1614                 if (ret < 0)
1615                         goto out_free;
1616
1617                 if (ext_buf == 1)
1618                         flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1619         }
1620
1621         if (dev->device.numa_node == SOCKET_ID_ANY)
1622                 dev->device.numa_node = rte_socket_id();
1623
1624         ret = eth_dev_vhost_create(dev, iface_name, queues,
1625                                    dev->device.numa_node, flags, disable_flags);
1626         if (ret == -1)
1627                 VHOST_LOG(ERR, "Failed to create %s\n", name);
1628
1629 out_free:
1630         rte_kvargs_free(kvlist);
1631         return ret;
1632 }
1633
1634 static int
1635 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1636 {
1637         const char *name;
1638         struct rte_eth_dev *eth_dev = NULL;
1639
1640         name = rte_vdev_device_name(dev);
1641         VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1642
1643         /* find an ethdev entry */
1644         eth_dev = rte_eth_dev_allocated(name);
1645         if (eth_dev == NULL)
1646                 return 0;
1647
1648         eth_dev_close(eth_dev);
1649         rte_eth_dev_release_port(eth_dev);
1650
1651         return 0;
1652 }
1653
1654 static struct rte_vdev_driver pmd_vhost_drv = {
1655         .probe = rte_pmd_vhost_probe,
1656         .remove = rte_pmd_vhost_remove,
1657 };
1658
1659 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1660 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1661 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1662         "iface=<ifc> "
1663         "queues=<int> "
1664         "client=<0|1> "
1665         "iommu-support=<0|1> "
1666         "postcopy-support=<0|1> "
1667         "tso=<0|1> "
1668         "linear-buffer=<0|1> "
1669         "ext-buffer=<0|1>");