1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
19 #include "rte_eth_vhost.h"
21 static int vhost_logtype;
23 #define VHOST_LOG(level, ...) \
24 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28 #define ETH_VHOST_IFACE_ARG "iface"
29 #define ETH_VHOST_QUEUES_ARG "queues"
30 #define ETH_VHOST_CLIENT_ARG "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF "linear-buffer"
36 #define ETH_VHOST_EXT_BUF "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
39 static const char *valid_arguments[] = {
43 ETH_VHOST_DEQUEUE_ZERO_COPY,
44 ETH_VHOST_IOMMU_SUPPORT,
45 ETH_VHOST_POSTCOPY_SUPPORT,
46 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
52 static struct rte_ether_addr base_eth_addr = {
63 enum vhost_xstats_pkts {
64 VHOST_UNDERSIZE_PKT = 0,
69 VHOST_512_TO_1023_PKT,
70 VHOST_1024_TO_1522_PKT,
71 VHOST_1523_TO_MAX_PKT,
76 VHOST_ERRORS_FRAGMENTED,
78 VHOST_UNKNOWN_PROTOCOL,
86 uint64_t xstats[VHOST_XSTATS_MAX];
91 rte_atomic32_t allow_queuing;
92 rte_atomic32_t while_queuing;
93 struct pmd_internal *internal;
94 struct rte_mempool *mb_pool;
96 uint16_t virtqueue_id;
97 struct vhost_stats stats;
100 struct pmd_internal {
101 rte_atomic32_t dev_attached;
104 uint64_t disable_flags;
107 rte_atomic32_t started;
111 struct internal_list {
112 TAILQ_ENTRY(internal_list) next;
113 struct rte_eth_dev *eth_dev;
116 TAILQ_HEAD(internal_list_head, internal_list);
117 static struct internal_list_head internal_list =
118 TAILQ_HEAD_INITIALIZER(internal_list);
120 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
122 static struct rte_eth_link pmd_link = {
124 .link_duplex = ETH_LINK_FULL_DUPLEX,
125 .link_status = ETH_LINK_DOWN
128 struct rte_vhost_vring_state {
131 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
132 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
134 unsigned int max_vring;
137 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
139 #define VHOST_XSTATS_NAME_SIZE 64
141 struct vhost_xstats_name_off {
142 char name[VHOST_XSTATS_NAME_SIZE];
146 /* [rx]_is prepended to the name string here */
147 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
149 offsetof(struct vhost_queue, stats.pkts)},
151 offsetof(struct vhost_queue, stats.bytes)},
153 offsetof(struct vhost_queue, stats.missed_pkts)},
154 {"broadcast_packets",
155 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
156 {"multicast_packets",
157 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
159 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
160 {"undersize_packets",
161 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
163 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
164 {"size_65_to_127_packets",
165 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
166 {"size_128_to_255_packets",
167 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
168 {"size_256_to_511_packets",
169 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
170 {"size_512_to_1023_packets",
171 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
172 {"size_1024_to_1522_packets",
173 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
174 {"size_1523_to_max_packets",
175 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
176 {"errors_with_bad_CRC",
177 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
178 {"fragmented_errors",
179 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
181 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
182 {"unknown_protos_packets",
183 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
186 /* [tx]_ is prepended to the name string here */
187 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
189 offsetof(struct vhost_queue, stats.pkts)},
191 offsetof(struct vhost_queue, stats.bytes)},
193 offsetof(struct vhost_queue, stats.missed_pkts)},
194 {"broadcast_packets",
195 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
196 {"multicast_packets",
197 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
199 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
200 {"undersize_packets",
201 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
203 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
204 {"size_65_to_127_packets",
205 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
206 {"size_128_to_255_packets",
207 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
208 {"size_256_to_511_packets",
209 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
210 {"size_512_to_1023_packets",
211 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
212 {"size_1024_to_1522_packets",
213 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
214 {"size_1523_to_max_packets",
215 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
216 {"errors_with_bad_CRC",
217 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
220 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
221 sizeof(vhost_rxport_stat_strings[0]))
223 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
224 sizeof(vhost_txport_stat_strings[0]))
227 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
229 struct vhost_queue *vq = NULL;
232 for (i = 0; i < dev->data->nb_rx_queues; i++) {
233 vq = dev->data->rx_queues[i];
236 memset(&vq->stats, 0, sizeof(vq->stats));
238 for (i = 0; i < dev->data->nb_tx_queues; i++) {
239 vq = dev->data->tx_queues[i];
242 memset(&vq->stats, 0, sizeof(vq->stats));
249 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
250 struct rte_eth_xstat_name *xstats_names,
251 unsigned int limit __rte_unused)
255 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
259 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
260 snprintf(xstats_names[count].name,
261 sizeof(xstats_names[count].name),
262 "rx_%s", vhost_rxport_stat_strings[t].name);
265 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
266 snprintf(xstats_names[count].name,
267 sizeof(xstats_names[count].name),
268 "tx_%s", vhost_txport_stat_strings[t].name);
275 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
280 unsigned int count = 0;
281 struct vhost_queue *vq = NULL;
282 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
287 for (i = 0; i < dev->data->nb_rx_queues; i++) {
288 vq = dev->data->rx_queues[i];
291 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
292 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
293 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
295 for (i = 0; i < dev->data->nb_tx_queues; i++) {
296 vq = dev->data->tx_queues[i];
299 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
300 + vq->stats.missed_pkts
301 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
302 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
304 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
305 xstats[count].value = 0;
306 for (i = 0; i < dev->data->nb_rx_queues; i++) {
307 vq = dev->data->rx_queues[i];
310 xstats[count].value +=
311 *(uint64_t *)(((char *)vq)
312 + vhost_rxport_stat_strings[t].offset);
314 xstats[count].id = count;
317 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
318 xstats[count].value = 0;
319 for (i = 0; i < dev->data->nb_tx_queues; i++) {
320 vq = dev->data->tx_queues[i];
323 xstats[count].value +=
324 *(uint64_t *)(((char *)vq)
325 + vhost_txport_stat_strings[t].offset);
327 xstats[count].id = count;
334 vhost_count_multicast_broadcast(struct vhost_queue *vq,
335 struct rte_mbuf *mbuf)
337 struct rte_ether_addr *ea = NULL;
338 struct vhost_stats *pstats = &vq->stats;
340 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
341 if (rte_is_multicast_ether_addr(ea)) {
342 if (rte_is_broadcast_ether_addr(ea))
343 pstats->xstats[VHOST_BROADCAST_PKT]++;
345 pstats->xstats[VHOST_MULTICAST_PKT]++;
350 vhost_update_packet_xstats(struct vhost_queue *vq,
351 struct rte_mbuf **bufs,
354 uint32_t pkt_len = 0;
357 struct vhost_stats *pstats = &vq->stats;
359 for (i = 0; i < count ; i++) {
360 pkt_len = bufs[i]->pkt_len;
362 pstats->xstats[VHOST_64_PKT]++;
363 } else if (pkt_len > 64 && pkt_len < 1024) {
364 index = (sizeof(pkt_len) * 8)
365 - __builtin_clz(pkt_len) - 5;
366 pstats->xstats[index]++;
369 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
370 else if (pkt_len <= 1522)
371 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
372 else if (pkt_len > 1522)
373 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
375 vhost_count_multicast_broadcast(vq, bufs[i]);
380 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
382 struct vhost_queue *r = q;
383 uint16_t i, nb_rx = 0;
384 uint16_t nb_receive = nb_bufs;
386 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
389 rte_atomic32_set(&r->while_queuing, 1);
391 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
394 /* Dequeue packets from guest TX queue */
397 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
398 VHOST_MAX_PKT_BURST);
400 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
401 r->mb_pool, &bufs[nb_rx],
405 nb_receive -= nb_pkts;
410 r->stats.pkts += nb_rx;
412 for (i = 0; likely(i < nb_rx); i++) {
413 bufs[i]->port = r->port;
414 bufs[i]->vlan_tci = 0;
416 if (r->internal->vlan_strip)
417 rte_vlan_strip(bufs[i]);
419 r->stats.bytes += bufs[i]->pkt_len;
422 vhost_update_packet_xstats(r, bufs, nb_rx);
425 rte_atomic32_set(&r->while_queuing, 0);
431 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
433 struct vhost_queue *r = q;
434 uint16_t i, nb_tx = 0;
435 uint16_t nb_send = 0;
437 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
440 rte_atomic32_set(&r->while_queuing, 1);
442 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
445 for (i = 0; i < nb_bufs; i++) {
446 struct rte_mbuf *m = bufs[i];
448 /* Do VLAN tag insertion */
449 if (m->ol_flags & PKT_TX_VLAN_PKT) {
450 int error = rte_vlan_insert(&m);
451 if (unlikely(error)) {
461 /* Enqueue packets to guest RX queue */
464 uint16_t num = (uint16_t)RTE_MIN(nb_send,
465 VHOST_MAX_PKT_BURST);
467 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
476 r->stats.pkts += nb_tx;
477 r->stats.missed_pkts += nb_bufs - nb_tx;
479 for (i = 0; likely(i < nb_tx); i++)
480 r->stats.bytes += bufs[i]->pkt_len;
482 vhost_update_packet_xstats(r, bufs, nb_tx);
484 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
485 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
486 * are increased when packets are not transmitted successfully.
488 for (i = nb_tx; i < nb_bufs; i++)
489 vhost_count_multicast_broadcast(r, bufs[i]);
491 for (i = 0; likely(i < nb_tx); i++)
492 rte_pktmbuf_free(bufs[i]);
494 rte_atomic32_set(&r->while_queuing, 0);
499 static inline struct internal_list *
500 find_internal_resource(char *ifname)
503 struct internal_list *list;
504 struct pmd_internal *internal;
509 pthread_mutex_lock(&internal_list_lock);
511 TAILQ_FOREACH(list, &internal_list, next) {
512 internal = list->eth_dev->data->dev_private;
513 if (!strcmp(internal->iface_name, ifname)) {
519 pthread_mutex_unlock(&internal_list_lock);
528 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
530 struct rte_vhost_vring vring;
531 struct vhost_queue *vq;
534 vq = dev->data->rx_queues[qid];
536 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
540 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
542 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
545 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
546 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
553 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
555 struct rte_vhost_vring vring;
556 struct vhost_queue *vq;
559 vq = dev->data->rx_queues[qid];
561 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
565 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
567 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
570 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
571 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
578 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
580 struct rte_intr_handle *intr_handle = dev->intr_handle;
583 if (intr_handle->intr_vec)
584 free(intr_handle->intr_vec);
588 dev->intr_handle = NULL;
592 eth_vhost_install_intr(struct rte_eth_dev *dev)
594 struct rte_vhost_vring vring;
595 struct vhost_queue *vq;
597 int nb_rxq = dev->data->nb_rx_queues;
601 /* uninstall firstly if we are reconnecting */
602 if (dev->intr_handle)
603 eth_vhost_uninstall_intr(dev);
605 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
606 if (!dev->intr_handle) {
607 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
610 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
612 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
614 dev->intr_handle->intr_vec =
615 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
617 if (!dev->intr_handle->intr_vec) {
619 "Failed to allocate memory for interrupt vector\n");
620 free(dev->intr_handle);
624 VHOST_LOG(INFO, "Prepare intr vec\n");
625 for (i = 0; i < nb_rxq; i++) {
626 vq = dev->data->rx_queues[i];
628 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
632 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
635 "Failed to get rxq-%d's vring, skip!\n", i);
639 if (vring.kickfd < 0) {
641 "rxq-%d's kickfd is invalid, skip!\n", i);
644 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
645 dev->intr_handle->efds[i] = vring.kickfd;
647 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
650 dev->intr_handle->nb_efd = count;
651 dev->intr_handle->max_intr = count + 1;
652 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
658 update_queuing_status(struct rte_eth_dev *dev)
660 struct pmd_internal *internal = dev->data->dev_private;
661 struct vhost_queue *vq;
663 int allow_queuing = 1;
665 if (!dev->data->rx_queues || !dev->data->tx_queues)
668 if (rte_atomic32_read(&internal->started) == 0 ||
669 rte_atomic32_read(&internal->dev_attached) == 0)
672 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
673 for (i = 0; i < dev->data->nb_rx_queues; i++) {
674 vq = dev->data->rx_queues[i];
677 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
678 while (rte_atomic32_read(&vq->while_queuing))
682 for (i = 0; i < dev->data->nb_tx_queues; i++) {
683 vq = dev->data->tx_queues[i];
686 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
687 while (rte_atomic32_read(&vq->while_queuing))
693 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
695 struct vhost_queue *vq;
698 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
699 vq = eth_dev->data->rx_queues[i];
702 vq->vid = internal->vid;
703 vq->internal = internal;
704 vq->port = eth_dev->data->port_id;
706 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
707 vq = eth_dev->data->tx_queues[i];
710 vq->vid = internal->vid;
711 vq->internal = internal;
712 vq->port = eth_dev->data->port_id;
719 struct rte_eth_dev *eth_dev;
720 struct internal_list *list;
721 struct pmd_internal *internal;
722 struct rte_eth_conf *dev_conf;
724 char ifname[PATH_MAX];
725 #ifdef RTE_LIBRTE_VHOST_NUMA
729 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
730 list = find_internal_resource(ifname);
732 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
736 eth_dev = list->eth_dev;
737 internal = eth_dev->data->dev_private;
738 dev_conf = ð_dev->data->dev_conf;
740 #ifdef RTE_LIBRTE_VHOST_NUMA
741 newnode = rte_vhost_get_numa_node(vid);
743 eth_dev->data->numa_node = newnode;
747 if (rte_atomic32_read(&internal->started) == 1) {
748 queue_setup(eth_dev, internal);
750 if (dev_conf->intr_conf.rxq) {
751 if (eth_vhost_install_intr(eth_dev) < 0) {
753 "Failed to install interrupt handler.");
758 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
761 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
762 rte_vhost_enable_guest_notification(vid, i, 0);
764 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
766 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
768 rte_atomic32_set(&internal->dev_attached, 1);
769 update_queuing_status(eth_dev);
771 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
773 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
779 destroy_device(int vid)
781 struct rte_eth_dev *eth_dev;
782 struct pmd_internal *internal;
783 struct vhost_queue *vq;
784 struct internal_list *list;
785 char ifname[PATH_MAX];
787 struct rte_vhost_vring_state *state;
789 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
790 list = find_internal_resource(ifname);
792 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
795 eth_dev = list->eth_dev;
796 internal = eth_dev->data->dev_private;
798 rte_atomic32_set(&internal->dev_attached, 0);
799 update_queuing_status(eth_dev);
801 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
803 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
804 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
805 vq = eth_dev->data->rx_queues[i];
810 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
811 vq = eth_dev->data->tx_queues[i];
818 state = vring_states[eth_dev->data->port_id];
819 rte_spinlock_lock(&state->lock);
820 for (i = 0; i <= state->max_vring; i++) {
821 state->cur[i] = false;
822 state->seen[i] = false;
824 state->max_vring = 0;
825 rte_spinlock_unlock(&state->lock);
827 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
828 eth_vhost_uninstall_intr(eth_dev);
830 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
834 vring_state_changed(int vid, uint16_t vring, int enable)
836 struct rte_vhost_vring_state *state;
837 struct rte_eth_dev *eth_dev;
838 struct internal_list *list;
839 char ifname[PATH_MAX];
841 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
842 list = find_internal_resource(ifname);
844 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
848 eth_dev = list->eth_dev;
850 state = vring_states[eth_dev->data->port_id];
851 rte_spinlock_lock(&state->lock);
852 if (state->cur[vring] == enable) {
853 rte_spinlock_unlock(&state->lock);
856 state->cur[vring] = enable;
857 state->max_vring = RTE_MAX(vring, state->max_vring);
858 rte_spinlock_unlock(&state->lock);
860 VHOST_LOG(INFO, "vring%u is %s\n",
861 vring, enable ? "enabled" : "disabled");
863 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
868 static struct vhost_device_ops vhost_ops = {
869 .new_device = new_device,
870 .destroy_device = destroy_device,
871 .vring_state_changed = vring_state_changed,
875 vhost_driver_setup(struct rte_eth_dev *eth_dev)
877 struct pmd_internal *internal = eth_dev->data->dev_private;
878 struct internal_list *list = NULL;
879 struct rte_vhost_vring_state *vring_state = NULL;
880 unsigned int numa_node = eth_dev->device->numa_node;
881 const char *name = eth_dev->device->name;
883 /* Don't try to setup again if it has already been done. */
884 list = find_internal_resource(internal->iface_name);
888 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
892 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
894 if (vring_state == NULL)
897 list->eth_dev = eth_dev;
898 pthread_mutex_lock(&internal_list_lock);
899 TAILQ_INSERT_TAIL(&internal_list, list, next);
900 pthread_mutex_unlock(&internal_list_lock);
902 rte_spinlock_init(&vring_state->lock);
903 vring_states[eth_dev->data->port_id] = vring_state;
905 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
908 if (internal->disable_flags) {
909 if (rte_vhost_driver_disable_features(internal->iface_name,
910 internal->disable_flags))
914 if (rte_vhost_driver_callback_register(internal->iface_name,
916 VHOST_LOG(ERR, "Can't register callbacks\n");
920 if (rte_vhost_driver_start(internal->iface_name) < 0) {
921 VHOST_LOG(ERR, "Failed to start driver for %s\n",
922 internal->iface_name);
929 rte_vhost_driver_unregister(internal->iface_name);
931 vring_states[eth_dev->data->port_id] = NULL;
932 pthread_mutex_lock(&internal_list_lock);
933 TAILQ_REMOVE(&internal_list, list, next);
934 pthread_mutex_unlock(&internal_list_lock);
935 rte_free(vring_state);
943 rte_eth_vhost_get_queue_event(uint16_t port_id,
944 struct rte_eth_vhost_queue_event *event)
946 struct rte_vhost_vring_state *state;
950 if (port_id >= RTE_MAX_ETHPORTS) {
951 VHOST_LOG(ERR, "Invalid port id\n");
955 state = vring_states[port_id];
957 VHOST_LOG(ERR, "Unused port\n");
961 rte_spinlock_lock(&state->lock);
962 for (i = 0; i <= state->max_vring; i++) {
963 idx = state->index++ % (state->max_vring + 1);
965 if (state->cur[idx] != state->seen[idx]) {
966 state->seen[idx] = state->cur[idx];
967 event->queue_id = idx / 2;
969 event->enable = state->cur[idx];
970 rte_spinlock_unlock(&state->lock);
974 rte_spinlock_unlock(&state->lock);
980 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
982 struct internal_list *list;
983 struct rte_eth_dev *eth_dev;
984 struct vhost_queue *vq;
987 if (!rte_eth_dev_is_valid_port(port_id))
990 pthread_mutex_lock(&internal_list_lock);
992 TAILQ_FOREACH(list, &internal_list, next) {
993 eth_dev = list->eth_dev;
994 if (eth_dev->data->port_id == port_id) {
995 vq = eth_dev->data->rx_queues[0];
1003 pthread_mutex_unlock(&internal_list_lock);
1009 eth_dev_configure(struct rte_eth_dev *dev)
1011 struct pmd_internal *internal = dev->data->dev_private;
1012 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1014 /* NOTE: the same process has to operate a vhost interface
1015 * from beginning to end (from eth_dev configure to eth_dev close).
1016 * It is user's responsibility at the moment.
1018 if (vhost_driver_setup(dev) < 0)
1021 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1027 eth_dev_start(struct rte_eth_dev *eth_dev)
1029 struct pmd_internal *internal = eth_dev->data->dev_private;
1030 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1032 queue_setup(eth_dev, internal);
1034 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1035 if (dev_conf->intr_conf.rxq) {
1036 if (eth_vhost_install_intr(eth_dev) < 0) {
1038 "Failed to install interrupt handler.");
1044 rte_atomic32_set(&internal->started, 1);
1045 update_queuing_status(eth_dev);
1051 eth_dev_stop(struct rte_eth_dev *dev)
1053 struct pmd_internal *internal = dev->data->dev_private;
1055 rte_atomic32_set(&internal->started, 0);
1056 update_queuing_status(dev);
1060 eth_dev_close(struct rte_eth_dev *dev)
1062 struct pmd_internal *internal;
1063 struct internal_list *list;
1066 internal = dev->data->dev_private;
1072 list = find_internal_resource(internal->iface_name);
1074 rte_vhost_driver_unregister(internal->iface_name);
1075 pthread_mutex_lock(&internal_list_lock);
1076 TAILQ_REMOVE(&internal_list, list, next);
1077 pthread_mutex_unlock(&internal_list_lock);
1081 if (dev->data->rx_queues)
1082 for (i = 0; i < dev->data->nb_rx_queues; i++)
1083 rte_free(dev->data->rx_queues[i]);
1085 if (dev->data->tx_queues)
1086 for (i = 0; i < dev->data->nb_tx_queues; i++)
1087 rte_free(dev->data->tx_queues[i]);
1089 rte_free(internal->iface_name);
1092 dev->data->dev_private = NULL;
1094 rte_free(vring_states[dev->data->port_id]);
1095 vring_states[dev->data->port_id] = NULL;
1099 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1100 uint16_t nb_rx_desc __rte_unused,
1101 unsigned int socket_id,
1102 const struct rte_eth_rxconf *rx_conf __rte_unused,
1103 struct rte_mempool *mb_pool)
1105 struct vhost_queue *vq;
1107 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1108 RTE_CACHE_LINE_SIZE, socket_id);
1110 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1114 vq->mb_pool = mb_pool;
1115 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1116 dev->data->rx_queues[rx_queue_id] = vq;
1122 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1123 uint16_t nb_tx_desc __rte_unused,
1124 unsigned int socket_id,
1125 const struct rte_eth_txconf *tx_conf __rte_unused)
1127 struct vhost_queue *vq;
1129 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1130 RTE_CACHE_LINE_SIZE, socket_id);
1132 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1136 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1137 dev->data->tx_queues[tx_queue_id] = vq;
1143 eth_dev_info(struct rte_eth_dev *dev,
1144 struct rte_eth_dev_info *dev_info)
1146 struct pmd_internal *internal;
1148 internal = dev->data->dev_private;
1149 if (internal == NULL) {
1150 VHOST_LOG(ERR, "Invalid device specified\n");
1154 dev_info->max_mac_addrs = 1;
1155 dev_info->max_rx_pktlen = (uint32_t)-1;
1156 dev_info->max_rx_queues = internal->max_queues;
1157 dev_info->max_tx_queues = internal->max_queues;
1158 dev_info->min_rx_bufsize = 0;
1160 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1161 DEV_TX_OFFLOAD_VLAN_INSERT;
1162 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1168 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1171 unsigned long rx_total = 0, tx_total = 0;
1172 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1173 struct vhost_queue *vq;
1175 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1176 i < dev->data->nb_rx_queues; i++) {
1177 if (dev->data->rx_queues[i] == NULL)
1179 vq = dev->data->rx_queues[i];
1180 stats->q_ipackets[i] = vq->stats.pkts;
1181 rx_total += stats->q_ipackets[i];
1183 stats->q_ibytes[i] = vq->stats.bytes;
1184 rx_total_bytes += stats->q_ibytes[i];
1187 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1188 i < dev->data->nb_tx_queues; i++) {
1189 if (dev->data->tx_queues[i] == NULL)
1191 vq = dev->data->tx_queues[i];
1192 stats->q_opackets[i] = vq->stats.pkts;
1193 tx_total += stats->q_opackets[i];
1195 stats->q_obytes[i] = vq->stats.bytes;
1196 tx_total_bytes += stats->q_obytes[i];
1199 stats->ipackets = rx_total;
1200 stats->opackets = tx_total;
1201 stats->ibytes = rx_total_bytes;
1202 stats->obytes = tx_total_bytes;
1208 eth_stats_reset(struct rte_eth_dev *dev)
1210 struct vhost_queue *vq;
1213 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1214 if (dev->data->rx_queues[i] == NULL)
1216 vq = dev->data->rx_queues[i];
1218 vq->stats.bytes = 0;
1220 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1221 if (dev->data->tx_queues[i] == NULL)
1223 vq = dev->data->tx_queues[i];
1225 vq->stats.bytes = 0;
1226 vq->stats.missed_pkts = 0;
1233 eth_queue_release(void *q)
1239 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1242 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1243 * and releases mbuf, so nothing to cleanup.
1249 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1250 int wait_to_complete __rte_unused)
1256 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1258 struct vhost_queue *vq;
1260 vq = dev->data->rx_queues[rx_queue_id];
1264 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1267 static const struct eth_dev_ops ops = {
1268 .dev_start = eth_dev_start,
1269 .dev_stop = eth_dev_stop,
1270 .dev_close = eth_dev_close,
1271 .dev_configure = eth_dev_configure,
1272 .dev_infos_get = eth_dev_info,
1273 .rx_queue_setup = eth_rx_queue_setup,
1274 .tx_queue_setup = eth_tx_queue_setup,
1275 .rx_queue_release = eth_queue_release,
1276 .tx_queue_release = eth_queue_release,
1277 .tx_done_cleanup = eth_tx_done_cleanup,
1278 .rx_queue_count = eth_rx_queue_count,
1279 .link_update = eth_link_update,
1280 .stats_get = eth_stats_get,
1281 .stats_reset = eth_stats_reset,
1282 .xstats_reset = vhost_dev_xstats_reset,
1283 .xstats_get = vhost_dev_xstats_get,
1284 .xstats_get_names = vhost_dev_xstats_get_names,
1285 .rx_queue_intr_enable = eth_rxq_intr_enable,
1286 .rx_queue_intr_disable = eth_rxq_intr_disable,
1290 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1291 int16_t queues, const unsigned int numa_node, uint64_t flags,
1292 uint64_t disable_flags)
1294 const char *name = rte_vdev_device_name(dev);
1295 struct rte_eth_dev_data *data;
1296 struct pmd_internal *internal = NULL;
1297 struct rte_eth_dev *eth_dev = NULL;
1298 struct rte_ether_addr *eth_addr = NULL;
1300 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1303 /* reserve an ethdev entry */
1304 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1305 if (eth_dev == NULL)
1307 data = eth_dev->data;
1309 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1310 if (eth_addr == NULL)
1312 data->mac_addrs = eth_addr;
1313 *eth_addr = base_eth_addr;
1314 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1316 /* now put it all together
1317 * - store queue data in internal,
1318 * - point eth_dev_data to internals
1319 * - and point eth_dev structure to new eth_dev_data structure
1321 internal = eth_dev->data->dev_private;
1322 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1324 if (internal->iface_name == NULL)
1326 strcpy(internal->iface_name, iface_name);
1328 data->nb_rx_queues = queues;
1329 data->nb_tx_queues = queues;
1330 internal->max_queues = queues;
1332 internal->flags = flags;
1333 internal->disable_flags = disable_flags;
1334 data->dev_link = pmd_link;
1335 data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1336 data->promiscuous = 1;
1337 data->all_multicast = 1;
1339 eth_dev->dev_ops = &ops;
1341 /* finally assign rx and tx ops */
1342 eth_dev->rx_pkt_burst = eth_vhost_rx;
1343 eth_dev->tx_pkt_burst = eth_vhost_tx;
1345 rte_eth_dev_probing_finish(eth_dev);
1350 rte_free(internal->iface_name);
1351 rte_eth_dev_release_port(eth_dev);
1357 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1359 const char **iface_name = extra_args;
1364 *iface_name = value;
1370 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1372 uint16_t *n = extra_args;
1374 if (value == NULL || extra_args == NULL)
1377 *n = (uint16_t)strtoul(value, NULL, 0);
1378 if (*n == USHRT_MAX && errno == ERANGE)
1385 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1387 struct rte_kvargs *kvlist = NULL;
1392 uint64_t disable_flags = 0;
1393 int client_mode = 0;
1394 int dequeue_zero_copy = 0;
1395 int iommu_support = 0;
1396 int postcopy_support = 0;
1400 struct rte_eth_dev *eth_dev;
1401 const char *name = rte_vdev_device_name(dev);
1403 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1405 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1406 eth_dev = rte_eth_dev_attach_secondary(name);
1408 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1411 eth_dev->rx_pkt_burst = eth_vhost_rx;
1412 eth_dev->tx_pkt_burst = eth_vhost_tx;
1413 eth_dev->dev_ops = &ops;
1414 if (dev->device.numa_node == SOCKET_ID_ANY)
1415 dev->device.numa_node = rte_socket_id();
1416 eth_dev->device = &dev->device;
1417 rte_eth_dev_probing_finish(eth_dev);
1421 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1425 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1426 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1427 &open_iface, &iface_name);
1435 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1436 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1437 &open_int, &queues);
1438 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1444 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1445 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1446 &open_int, &client_mode);
1451 flags |= RTE_VHOST_USER_CLIENT;
1454 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1455 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1456 &open_int, &dequeue_zero_copy);
1460 if (dequeue_zero_copy)
1461 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1464 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1465 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1466 &open_int, &iommu_support);
1471 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1474 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1475 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1476 &open_int, &postcopy_support);
1480 if (postcopy_support)
1481 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1484 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1485 ret = rte_kvargs_process(kvlist,
1486 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1492 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1493 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1497 if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1498 ret = rte_kvargs_process(kvlist,
1499 ETH_VHOST_LINEAR_BUF,
1500 &open_int, &linear_buf);
1504 if (linear_buf == 1)
1505 flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1508 if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1509 ret = rte_kvargs_process(kvlist,
1511 &open_int, &ext_buf);
1516 flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1519 if (dev->device.numa_node == SOCKET_ID_ANY)
1520 dev->device.numa_node = rte_socket_id();
1522 ret = eth_dev_vhost_create(dev, iface_name, queues,
1523 dev->device.numa_node, flags, disable_flags);
1525 VHOST_LOG(ERR, "Failed to create %s\n", name);
1528 rte_kvargs_free(kvlist);
1533 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1536 struct rte_eth_dev *eth_dev = NULL;
1538 name = rte_vdev_device_name(dev);
1539 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1541 /* find an ethdev entry */
1542 eth_dev = rte_eth_dev_allocated(name);
1543 if (eth_dev == NULL)
1546 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1547 return rte_eth_dev_release_port(eth_dev);
1549 eth_dev_close(eth_dev);
1551 rte_eth_dev_release_port(eth_dev);
1556 static struct rte_vdev_driver pmd_vhost_drv = {
1557 .probe = rte_pmd_vhost_probe,
1558 .remove = rte_pmd_vhost_remove,
1561 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1562 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1563 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1567 "dequeue-zero-copy=<0|1> "
1568 "iommu-support=<0|1> "
1569 "postcopy-support=<0|1> "
1571 "linear-buffer=<0|1> "
1572 "ext-buffer=<0|1>");
1574 RTE_INIT(vhost_init_log)
1576 vhost_logtype = rte_log_register("pmd.net.vhost");
1577 if (vhost_logtype >= 0)
1578 rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);