1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
19 #include "rte_eth_vhost.h"
21 RTE_LOG_REGISTER(vhost_logtype, pmd.net.vhost, NOTICE);
23 #define VHOST_LOG(level, ...) \
24 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28 #define ETH_VHOST_IFACE_ARG "iface"
29 #define ETH_VHOST_QUEUES_ARG "queues"
30 #define ETH_VHOST_CLIENT_ARG "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
34 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
35 #define ETH_VHOST_LINEAR_BUF "linear-buffer"
36 #define ETH_VHOST_EXT_BUF "ext-buffer"
37 #define VHOST_MAX_PKT_BURST 32
39 static const char *valid_arguments[] = {
43 ETH_VHOST_DEQUEUE_ZERO_COPY,
44 ETH_VHOST_IOMMU_SUPPORT,
45 ETH_VHOST_POSTCOPY_SUPPORT,
46 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
52 static struct rte_ether_addr base_eth_addr = {
63 enum vhost_xstats_pkts {
64 VHOST_UNDERSIZE_PKT = 0,
69 VHOST_512_TO_1023_PKT,
70 VHOST_1024_TO_1522_PKT,
71 VHOST_1523_TO_MAX_PKT,
76 VHOST_ERRORS_FRAGMENTED,
78 VHOST_UNKNOWN_PROTOCOL,
86 uint64_t xstats[VHOST_XSTATS_MAX];
91 rte_atomic32_t allow_queuing;
92 rte_atomic32_t while_queuing;
93 struct pmd_internal *internal;
94 struct rte_mempool *mb_pool;
96 uint16_t virtqueue_id;
98 struct vhost_stats stats;
101 struct pmd_internal {
102 rte_atomic32_t dev_attached;
105 uint64_t disable_flags;
108 rte_atomic32_t started;
112 struct internal_list {
113 TAILQ_ENTRY(internal_list) next;
114 struct rte_eth_dev *eth_dev;
117 TAILQ_HEAD(internal_list_head, internal_list);
118 static struct internal_list_head internal_list =
119 TAILQ_HEAD_INITIALIZER(internal_list);
121 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
123 static struct rte_eth_link pmd_link = {
125 .link_duplex = ETH_LINK_FULL_DUPLEX,
126 .link_status = ETH_LINK_DOWN
129 struct rte_vhost_vring_state {
132 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
133 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
135 unsigned int max_vring;
138 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
140 #define VHOST_XSTATS_NAME_SIZE 64
142 struct vhost_xstats_name_off {
143 char name[VHOST_XSTATS_NAME_SIZE];
147 /* [rx]_is prepended to the name string here */
148 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
150 offsetof(struct vhost_queue, stats.pkts)},
152 offsetof(struct vhost_queue, stats.bytes)},
154 offsetof(struct vhost_queue, stats.missed_pkts)},
155 {"broadcast_packets",
156 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
157 {"multicast_packets",
158 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
160 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
161 {"undersize_packets",
162 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
164 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
165 {"size_65_to_127_packets",
166 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
167 {"size_128_to_255_packets",
168 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
169 {"size_256_to_511_packets",
170 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
171 {"size_512_to_1023_packets",
172 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
173 {"size_1024_to_1522_packets",
174 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
175 {"size_1523_to_max_packets",
176 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
177 {"errors_with_bad_CRC",
178 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
179 {"fragmented_errors",
180 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
182 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
183 {"unknown_protos_packets",
184 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
187 /* [tx]_ is prepended to the name string here */
188 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
190 offsetof(struct vhost_queue, stats.pkts)},
192 offsetof(struct vhost_queue, stats.bytes)},
194 offsetof(struct vhost_queue, stats.missed_pkts)},
195 {"broadcast_packets",
196 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
197 {"multicast_packets",
198 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
200 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
201 {"undersize_packets",
202 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
204 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
205 {"size_65_to_127_packets",
206 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
207 {"size_128_to_255_packets",
208 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
209 {"size_256_to_511_packets",
210 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
211 {"size_512_to_1023_packets",
212 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
213 {"size_1024_to_1522_packets",
214 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
215 {"size_1523_to_max_packets",
216 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
217 {"errors_with_bad_CRC",
218 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
221 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
222 sizeof(vhost_rxport_stat_strings[0]))
224 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
225 sizeof(vhost_txport_stat_strings[0]))
228 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
230 struct vhost_queue *vq = NULL;
233 for (i = 0; i < dev->data->nb_rx_queues; i++) {
234 vq = dev->data->rx_queues[i];
237 memset(&vq->stats, 0, sizeof(vq->stats));
239 for (i = 0; i < dev->data->nb_tx_queues; i++) {
240 vq = dev->data->tx_queues[i];
243 memset(&vq->stats, 0, sizeof(vq->stats));
250 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
251 struct rte_eth_xstat_name *xstats_names,
252 unsigned int limit __rte_unused)
256 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
260 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
261 snprintf(xstats_names[count].name,
262 sizeof(xstats_names[count].name),
263 "rx_%s", vhost_rxport_stat_strings[t].name);
266 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
267 snprintf(xstats_names[count].name,
268 sizeof(xstats_names[count].name),
269 "tx_%s", vhost_txport_stat_strings[t].name);
276 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
281 unsigned int count = 0;
282 struct vhost_queue *vq = NULL;
283 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
288 for (i = 0; i < dev->data->nb_rx_queues; i++) {
289 vq = dev->data->rx_queues[i];
292 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
293 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
294 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
296 for (i = 0; i < dev->data->nb_tx_queues; i++) {
297 vq = dev->data->tx_queues[i];
300 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
301 + vq->stats.missed_pkts
302 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
303 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
305 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
306 xstats[count].value = 0;
307 for (i = 0; i < dev->data->nb_rx_queues; i++) {
308 vq = dev->data->rx_queues[i];
311 xstats[count].value +=
312 *(uint64_t *)(((char *)vq)
313 + vhost_rxport_stat_strings[t].offset);
315 xstats[count].id = count;
318 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
319 xstats[count].value = 0;
320 for (i = 0; i < dev->data->nb_tx_queues; i++) {
321 vq = dev->data->tx_queues[i];
324 xstats[count].value +=
325 *(uint64_t *)(((char *)vq)
326 + vhost_txport_stat_strings[t].offset);
328 xstats[count].id = count;
335 vhost_count_multicast_broadcast(struct vhost_queue *vq,
336 struct rte_mbuf *mbuf)
338 struct rte_ether_addr *ea = NULL;
339 struct vhost_stats *pstats = &vq->stats;
341 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
342 if (rte_is_multicast_ether_addr(ea)) {
343 if (rte_is_broadcast_ether_addr(ea))
344 pstats->xstats[VHOST_BROADCAST_PKT]++;
346 pstats->xstats[VHOST_MULTICAST_PKT]++;
351 vhost_update_packet_xstats(struct vhost_queue *vq,
352 struct rte_mbuf **bufs,
355 uint32_t pkt_len = 0;
358 struct vhost_stats *pstats = &vq->stats;
360 for (i = 0; i < count ; i++) {
361 pkt_len = bufs[i]->pkt_len;
363 pstats->xstats[VHOST_64_PKT]++;
364 } else if (pkt_len > 64 && pkt_len < 1024) {
365 index = (sizeof(pkt_len) * 8)
366 - __builtin_clz(pkt_len) - 5;
367 pstats->xstats[index]++;
370 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
371 else if (pkt_len <= 1522)
372 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
373 else if (pkt_len > 1522)
374 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
376 vhost_count_multicast_broadcast(vq, bufs[i]);
381 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
383 struct vhost_queue *r = q;
384 uint16_t i, nb_rx = 0;
385 uint16_t nb_receive = nb_bufs;
387 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
390 rte_atomic32_set(&r->while_queuing, 1);
392 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
395 /* Dequeue packets from guest TX queue */
398 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
399 VHOST_MAX_PKT_BURST);
401 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
402 r->mb_pool, &bufs[nb_rx],
406 nb_receive -= nb_pkts;
411 r->stats.pkts += nb_rx;
413 for (i = 0; likely(i < nb_rx); i++) {
414 bufs[i]->port = r->port;
415 bufs[i]->vlan_tci = 0;
417 if (r->internal->vlan_strip)
418 rte_vlan_strip(bufs[i]);
420 r->stats.bytes += bufs[i]->pkt_len;
423 vhost_update_packet_xstats(r, bufs, nb_rx);
426 rte_atomic32_set(&r->while_queuing, 0);
432 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
434 struct vhost_queue *r = q;
435 uint16_t i, nb_tx = 0;
436 uint16_t nb_send = 0;
438 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
441 rte_atomic32_set(&r->while_queuing, 1);
443 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
446 for (i = 0; i < nb_bufs; i++) {
447 struct rte_mbuf *m = bufs[i];
449 /* Do VLAN tag insertion */
450 if (m->ol_flags & PKT_TX_VLAN_PKT) {
451 int error = rte_vlan_insert(&m);
452 if (unlikely(error)) {
462 /* Enqueue packets to guest RX queue */
465 uint16_t num = (uint16_t)RTE_MIN(nb_send,
466 VHOST_MAX_PKT_BURST);
468 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
477 r->stats.pkts += nb_tx;
478 r->stats.missed_pkts += nb_bufs - nb_tx;
480 for (i = 0; likely(i < nb_tx); i++)
481 r->stats.bytes += bufs[i]->pkt_len;
483 vhost_update_packet_xstats(r, bufs, nb_tx);
485 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
486 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
487 * are increased when packets are not transmitted successfully.
489 for (i = nb_tx; i < nb_bufs; i++)
490 vhost_count_multicast_broadcast(r, bufs[i]);
492 for (i = 0; likely(i < nb_tx); i++)
493 rte_pktmbuf_free(bufs[i]);
495 rte_atomic32_set(&r->while_queuing, 0);
500 static inline struct internal_list *
501 find_internal_resource(char *ifname)
504 struct internal_list *list;
505 struct pmd_internal *internal;
510 pthread_mutex_lock(&internal_list_lock);
512 TAILQ_FOREACH(list, &internal_list, next) {
513 internal = list->eth_dev->data->dev_private;
514 if (!strcmp(internal->iface_name, ifname)) {
520 pthread_mutex_unlock(&internal_list_lock);
529 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
531 struct rte_vhost_vring vring;
532 struct vhost_queue *vq;
535 vq = dev->data->rx_queues[qid];
537 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
541 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
543 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
546 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
547 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
556 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
558 struct rte_vhost_vring vring;
559 struct vhost_queue *vq;
562 vq = dev->data->rx_queues[qid];
564 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
568 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
570 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
573 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
574 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
583 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
585 struct rte_intr_handle *intr_handle = dev->intr_handle;
588 if (intr_handle->intr_vec)
589 free(intr_handle->intr_vec);
593 dev->intr_handle = NULL;
597 eth_vhost_install_intr(struct rte_eth_dev *dev)
599 struct rte_vhost_vring vring;
600 struct vhost_queue *vq;
602 int nb_rxq = dev->data->nb_rx_queues;
606 /* uninstall firstly if we are reconnecting */
607 if (dev->intr_handle)
608 eth_vhost_uninstall_intr(dev);
610 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
611 if (!dev->intr_handle) {
612 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
615 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
617 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
619 dev->intr_handle->intr_vec =
620 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
622 if (!dev->intr_handle->intr_vec) {
624 "Failed to allocate memory for interrupt vector\n");
625 free(dev->intr_handle);
629 VHOST_LOG(INFO, "Prepare intr vec\n");
630 for (i = 0; i < nb_rxq; i++) {
631 vq = dev->data->rx_queues[i];
633 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
637 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
640 "Failed to get rxq-%d's vring, skip!\n", i);
644 if (vring.kickfd < 0) {
646 "rxq-%d's kickfd is invalid, skip!\n", i);
649 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
650 dev->intr_handle->efds[i] = vring.kickfd;
652 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
655 dev->intr_handle->nb_efd = count;
656 dev->intr_handle->max_intr = count + 1;
657 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
663 update_queuing_status(struct rte_eth_dev *dev)
665 struct pmd_internal *internal = dev->data->dev_private;
666 struct vhost_queue *vq;
668 int allow_queuing = 1;
670 if (!dev->data->rx_queues || !dev->data->tx_queues)
673 if (rte_atomic32_read(&internal->started) == 0 ||
674 rte_atomic32_read(&internal->dev_attached) == 0)
677 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
678 for (i = 0; i < dev->data->nb_rx_queues; i++) {
679 vq = dev->data->rx_queues[i];
682 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
683 while (rte_atomic32_read(&vq->while_queuing))
687 for (i = 0; i < dev->data->nb_tx_queues; i++) {
688 vq = dev->data->tx_queues[i];
691 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
692 while (rte_atomic32_read(&vq->while_queuing))
698 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
700 struct vhost_queue *vq;
703 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
704 vq = eth_dev->data->rx_queues[i];
707 vq->vid = internal->vid;
708 vq->internal = internal;
709 vq->port = eth_dev->data->port_id;
711 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
712 vq = eth_dev->data->tx_queues[i];
715 vq->vid = internal->vid;
716 vq->internal = internal;
717 vq->port = eth_dev->data->port_id;
724 struct rte_eth_dev *eth_dev;
725 struct internal_list *list;
726 struct pmd_internal *internal;
727 struct rte_eth_conf *dev_conf;
729 char ifname[PATH_MAX];
730 #ifdef RTE_LIBRTE_VHOST_NUMA
734 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
735 list = find_internal_resource(ifname);
737 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
741 eth_dev = list->eth_dev;
742 internal = eth_dev->data->dev_private;
743 dev_conf = ð_dev->data->dev_conf;
745 #ifdef RTE_LIBRTE_VHOST_NUMA
746 newnode = rte_vhost_get_numa_node(vid);
748 eth_dev->data->numa_node = newnode;
752 if (rte_atomic32_read(&internal->started) == 1) {
753 queue_setup(eth_dev, internal);
755 if (dev_conf->intr_conf.rxq) {
756 if (eth_vhost_install_intr(eth_dev) < 0) {
758 "Failed to install interrupt handler.");
763 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
766 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
767 rte_vhost_enable_guest_notification(vid, i, 0);
769 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
771 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
773 rte_atomic32_set(&internal->dev_attached, 1);
774 update_queuing_status(eth_dev);
776 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
778 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
784 destroy_device(int vid)
786 struct rte_eth_dev *eth_dev;
787 struct pmd_internal *internal;
788 struct vhost_queue *vq;
789 struct internal_list *list;
790 char ifname[PATH_MAX];
792 struct rte_vhost_vring_state *state;
794 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
795 list = find_internal_resource(ifname);
797 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
800 eth_dev = list->eth_dev;
801 internal = eth_dev->data->dev_private;
803 rte_atomic32_set(&internal->dev_attached, 0);
804 update_queuing_status(eth_dev);
806 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
808 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
809 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
810 vq = eth_dev->data->rx_queues[i];
815 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
816 vq = eth_dev->data->tx_queues[i];
823 state = vring_states[eth_dev->data->port_id];
824 rte_spinlock_lock(&state->lock);
825 for (i = 0; i <= state->max_vring; i++) {
826 state->cur[i] = false;
827 state->seen[i] = false;
829 state->max_vring = 0;
830 rte_spinlock_unlock(&state->lock);
832 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
833 eth_vhost_uninstall_intr(eth_dev);
835 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
839 vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
841 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
842 struct pmd_internal *internal = eth_dev->data->dev_private;
843 struct rte_vhost_vring vring;
844 struct vhost_queue *vq;
845 int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
849 * The vring kickfd may be changed after the new device notification.
850 * Update it when the vring state is updated.
852 if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
853 rte_atomic32_read(&internal->dev_attached) &&
854 rte_atomic32_read(&internal->started) &&
855 dev_conf->intr_conf.rxq) {
856 vq = eth_dev->data->rx_queues[rx_idx];
857 ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
860 eth_dev->intr_handle->efds[rx_idx]) {
862 "kickfd for rxq-%d was changed.\n",
864 eth_dev->intr_handle->efds[rx_idx] =
868 rte_vhost_enable_guest_notification(vid, vring_id,
878 vring_state_changed(int vid, uint16_t vring, int enable)
880 struct rte_vhost_vring_state *state;
881 struct rte_eth_dev *eth_dev;
882 struct internal_list *list;
883 char ifname[PATH_MAX];
885 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
886 list = find_internal_resource(ifname);
888 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
892 eth_dev = list->eth_dev;
894 state = vring_states[eth_dev->data->port_id];
896 if (enable && vring_conf_update(vid, eth_dev, vring))
897 VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
900 rte_spinlock_lock(&state->lock);
901 if (state->cur[vring] == enable) {
902 rte_spinlock_unlock(&state->lock);
905 state->cur[vring] = enable;
906 state->max_vring = RTE_MAX(vring, state->max_vring);
907 rte_spinlock_unlock(&state->lock);
909 VHOST_LOG(INFO, "vring%u is %s\n",
910 vring, enable ? "enabled" : "disabled");
912 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
917 static struct vhost_device_ops vhost_ops = {
918 .new_device = new_device,
919 .destroy_device = destroy_device,
920 .vring_state_changed = vring_state_changed,
924 vhost_driver_setup(struct rte_eth_dev *eth_dev)
926 struct pmd_internal *internal = eth_dev->data->dev_private;
927 struct internal_list *list = NULL;
928 struct rte_vhost_vring_state *vring_state = NULL;
929 unsigned int numa_node = eth_dev->device->numa_node;
930 const char *name = eth_dev->device->name;
932 /* Don't try to setup again if it has already been done. */
933 list = find_internal_resource(internal->iface_name);
937 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
941 vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
943 if (vring_state == NULL)
946 list->eth_dev = eth_dev;
947 pthread_mutex_lock(&internal_list_lock);
948 TAILQ_INSERT_TAIL(&internal_list, list, next);
949 pthread_mutex_unlock(&internal_list_lock);
951 rte_spinlock_init(&vring_state->lock);
952 vring_states[eth_dev->data->port_id] = vring_state;
954 if (rte_vhost_driver_register(internal->iface_name, internal->flags))
957 if (internal->disable_flags) {
958 if (rte_vhost_driver_disable_features(internal->iface_name,
959 internal->disable_flags))
963 if (rte_vhost_driver_callback_register(internal->iface_name,
965 VHOST_LOG(ERR, "Can't register callbacks\n");
969 if (rte_vhost_driver_start(internal->iface_name) < 0) {
970 VHOST_LOG(ERR, "Failed to start driver for %s\n",
971 internal->iface_name);
978 rte_vhost_driver_unregister(internal->iface_name);
980 vring_states[eth_dev->data->port_id] = NULL;
981 pthread_mutex_lock(&internal_list_lock);
982 TAILQ_REMOVE(&internal_list, list, next);
983 pthread_mutex_unlock(&internal_list_lock);
984 rte_free(vring_state);
992 rte_eth_vhost_get_queue_event(uint16_t port_id,
993 struct rte_eth_vhost_queue_event *event)
995 struct rte_vhost_vring_state *state;
999 if (port_id >= RTE_MAX_ETHPORTS) {
1000 VHOST_LOG(ERR, "Invalid port id\n");
1004 state = vring_states[port_id];
1006 VHOST_LOG(ERR, "Unused port\n");
1010 rte_spinlock_lock(&state->lock);
1011 for (i = 0; i <= state->max_vring; i++) {
1012 idx = state->index++ % (state->max_vring + 1);
1014 if (state->cur[idx] != state->seen[idx]) {
1015 state->seen[idx] = state->cur[idx];
1016 event->queue_id = idx / 2;
1017 event->rx = idx & 1;
1018 event->enable = state->cur[idx];
1019 rte_spinlock_unlock(&state->lock);
1023 rte_spinlock_unlock(&state->lock);
1029 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
1031 struct internal_list *list;
1032 struct rte_eth_dev *eth_dev;
1033 struct vhost_queue *vq;
1036 if (!rte_eth_dev_is_valid_port(port_id))
1039 pthread_mutex_lock(&internal_list_lock);
1041 TAILQ_FOREACH(list, &internal_list, next) {
1042 eth_dev = list->eth_dev;
1043 if (eth_dev->data->port_id == port_id) {
1044 vq = eth_dev->data->rx_queues[0];
1052 pthread_mutex_unlock(&internal_list_lock);
1058 eth_dev_configure(struct rte_eth_dev *dev)
1060 struct pmd_internal *internal = dev->data->dev_private;
1061 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1063 /* NOTE: the same process has to operate a vhost interface
1064 * from beginning to end (from eth_dev configure to eth_dev close).
1065 * It is user's responsibility at the moment.
1067 if (vhost_driver_setup(dev) < 0)
1070 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1076 eth_dev_start(struct rte_eth_dev *eth_dev)
1078 struct pmd_internal *internal = eth_dev->data->dev_private;
1079 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
1081 queue_setup(eth_dev, internal);
1083 if (rte_atomic32_read(&internal->dev_attached) == 1) {
1084 if (dev_conf->intr_conf.rxq) {
1085 if (eth_vhost_install_intr(eth_dev) < 0) {
1087 "Failed to install interrupt handler.");
1093 rte_atomic32_set(&internal->started, 1);
1094 update_queuing_status(eth_dev);
1100 eth_dev_stop(struct rte_eth_dev *dev)
1102 struct pmd_internal *internal = dev->data->dev_private;
1104 rte_atomic32_set(&internal->started, 0);
1105 update_queuing_status(dev);
1109 eth_dev_close(struct rte_eth_dev *dev)
1111 struct pmd_internal *internal;
1112 struct internal_list *list;
1115 internal = dev->data->dev_private;
1121 list = find_internal_resource(internal->iface_name);
1123 rte_vhost_driver_unregister(internal->iface_name);
1124 pthread_mutex_lock(&internal_list_lock);
1125 TAILQ_REMOVE(&internal_list, list, next);
1126 pthread_mutex_unlock(&internal_list_lock);
1130 if (dev->data->rx_queues)
1131 for (i = 0; i < dev->data->nb_rx_queues; i++)
1132 rte_free(dev->data->rx_queues[i]);
1134 if (dev->data->tx_queues)
1135 for (i = 0; i < dev->data->nb_tx_queues; i++)
1136 rte_free(dev->data->tx_queues[i]);
1138 rte_free(internal->iface_name);
1141 dev->data->dev_private = NULL;
1143 rte_free(vring_states[dev->data->port_id]);
1144 vring_states[dev->data->port_id] = NULL;
1148 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1149 uint16_t nb_rx_desc __rte_unused,
1150 unsigned int socket_id,
1151 const struct rte_eth_rxconf *rx_conf __rte_unused,
1152 struct rte_mempool *mb_pool)
1154 struct vhost_queue *vq;
1156 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1157 RTE_CACHE_LINE_SIZE, socket_id);
1159 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1163 vq->mb_pool = mb_pool;
1164 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1165 dev->data->rx_queues[rx_queue_id] = vq;
1171 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1172 uint16_t nb_tx_desc __rte_unused,
1173 unsigned int socket_id,
1174 const struct rte_eth_txconf *tx_conf __rte_unused)
1176 struct vhost_queue *vq;
1178 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1179 RTE_CACHE_LINE_SIZE, socket_id);
1181 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1185 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1186 dev->data->tx_queues[tx_queue_id] = vq;
1192 eth_dev_info(struct rte_eth_dev *dev,
1193 struct rte_eth_dev_info *dev_info)
1195 struct pmd_internal *internal;
1197 internal = dev->data->dev_private;
1198 if (internal == NULL) {
1199 VHOST_LOG(ERR, "Invalid device specified\n");
1203 dev_info->max_mac_addrs = 1;
1204 dev_info->max_rx_pktlen = (uint32_t)-1;
1205 dev_info->max_rx_queues = internal->max_queues;
1206 dev_info->max_tx_queues = internal->max_queues;
1207 dev_info->min_rx_bufsize = 0;
1209 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1210 DEV_TX_OFFLOAD_VLAN_INSERT;
1211 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1217 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1220 unsigned long rx_total = 0, tx_total = 0;
1221 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1222 struct vhost_queue *vq;
1224 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1225 i < dev->data->nb_rx_queues; i++) {
1226 if (dev->data->rx_queues[i] == NULL)
1228 vq = dev->data->rx_queues[i];
1229 stats->q_ipackets[i] = vq->stats.pkts;
1230 rx_total += stats->q_ipackets[i];
1232 stats->q_ibytes[i] = vq->stats.bytes;
1233 rx_total_bytes += stats->q_ibytes[i];
1236 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1237 i < dev->data->nb_tx_queues; i++) {
1238 if (dev->data->tx_queues[i] == NULL)
1240 vq = dev->data->tx_queues[i];
1241 stats->q_opackets[i] = vq->stats.pkts;
1242 tx_total += stats->q_opackets[i];
1244 stats->q_obytes[i] = vq->stats.bytes;
1245 tx_total_bytes += stats->q_obytes[i];
1248 stats->ipackets = rx_total;
1249 stats->opackets = tx_total;
1250 stats->ibytes = rx_total_bytes;
1251 stats->obytes = tx_total_bytes;
1257 eth_stats_reset(struct rte_eth_dev *dev)
1259 struct vhost_queue *vq;
1262 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1263 if (dev->data->rx_queues[i] == NULL)
1265 vq = dev->data->rx_queues[i];
1267 vq->stats.bytes = 0;
1269 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1270 if (dev->data->tx_queues[i] == NULL)
1272 vq = dev->data->tx_queues[i];
1274 vq->stats.bytes = 0;
1275 vq->stats.missed_pkts = 0;
1282 eth_queue_release(void *q)
1288 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1291 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1292 * and releases mbuf, so nothing to cleanup.
1298 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1299 int wait_to_complete __rte_unused)
1305 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1307 struct vhost_queue *vq;
1309 vq = dev->data->rx_queues[rx_queue_id];
1313 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1316 static const struct eth_dev_ops ops = {
1317 .dev_start = eth_dev_start,
1318 .dev_stop = eth_dev_stop,
1319 .dev_close = eth_dev_close,
1320 .dev_configure = eth_dev_configure,
1321 .dev_infos_get = eth_dev_info,
1322 .rx_queue_setup = eth_rx_queue_setup,
1323 .tx_queue_setup = eth_tx_queue_setup,
1324 .rx_queue_release = eth_queue_release,
1325 .tx_queue_release = eth_queue_release,
1326 .tx_done_cleanup = eth_tx_done_cleanup,
1327 .rx_queue_count = eth_rx_queue_count,
1328 .link_update = eth_link_update,
1329 .stats_get = eth_stats_get,
1330 .stats_reset = eth_stats_reset,
1331 .xstats_reset = vhost_dev_xstats_reset,
1332 .xstats_get = vhost_dev_xstats_get,
1333 .xstats_get_names = vhost_dev_xstats_get_names,
1334 .rx_queue_intr_enable = eth_rxq_intr_enable,
1335 .rx_queue_intr_disable = eth_rxq_intr_disable,
1339 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1340 int16_t queues, const unsigned int numa_node, uint64_t flags,
1341 uint64_t disable_flags)
1343 const char *name = rte_vdev_device_name(dev);
1344 struct rte_eth_dev_data *data;
1345 struct pmd_internal *internal = NULL;
1346 struct rte_eth_dev *eth_dev = NULL;
1347 struct rte_ether_addr *eth_addr = NULL;
1349 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1352 /* reserve an ethdev entry */
1353 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1354 if (eth_dev == NULL)
1356 data = eth_dev->data;
1358 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1359 if (eth_addr == NULL)
1361 data->mac_addrs = eth_addr;
1362 *eth_addr = base_eth_addr;
1363 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1365 /* now put it all together
1366 * - store queue data in internal,
1367 * - point eth_dev_data to internals
1368 * - and point eth_dev structure to new eth_dev_data structure
1370 internal = eth_dev->data->dev_private;
1371 internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
1373 if (internal->iface_name == NULL)
1375 strcpy(internal->iface_name, iface_name);
1377 data->nb_rx_queues = queues;
1378 data->nb_tx_queues = queues;
1379 internal->max_queues = queues;
1381 internal->flags = flags;
1382 internal->disable_flags = disable_flags;
1383 data->dev_link = pmd_link;
1384 data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
1385 data->promiscuous = 1;
1386 data->all_multicast = 1;
1388 eth_dev->dev_ops = &ops;
1390 /* finally assign rx and tx ops */
1391 eth_dev->rx_pkt_burst = eth_vhost_rx;
1392 eth_dev->tx_pkt_burst = eth_vhost_tx;
1394 rte_eth_dev_probing_finish(eth_dev);
1399 rte_free(internal->iface_name);
1400 rte_eth_dev_release_port(eth_dev);
1406 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1408 const char **iface_name = extra_args;
1413 *iface_name = value;
1419 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1421 uint16_t *n = extra_args;
1423 if (value == NULL || extra_args == NULL)
1426 *n = (uint16_t)strtoul(value, NULL, 0);
1427 if (*n == USHRT_MAX && errno == ERANGE)
1434 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1436 struct rte_kvargs *kvlist = NULL;
1441 uint64_t disable_flags = 0;
1442 int client_mode = 0;
1443 int dequeue_zero_copy = 0;
1444 int iommu_support = 0;
1445 int postcopy_support = 0;
1449 struct rte_eth_dev *eth_dev;
1450 const char *name = rte_vdev_device_name(dev);
1452 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1454 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1455 eth_dev = rte_eth_dev_attach_secondary(name);
1457 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1460 eth_dev->rx_pkt_burst = eth_vhost_rx;
1461 eth_dev->tx_pkt_burst = eth_vhost_tx;
1462 eth_dev->dev_ops = &ops;
1463 if (dev->device.numa_node == SOCKET_ID_ANY)
1464 dev->device.numa_node = rte_socket_id();
1465 eth_dev->device = &dev->device;
1466 rte_eth_dev_probing_finish(eth_dev);
1470 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1474 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1475 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1476 &open_iface, &iface_name);
1484 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1485 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1486 &open_int, &queues);
1487 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1493 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1494 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1495 &open_int, &client_mode);
1500 flags |= RTE_VHOST_USER_CLIENT;
1503 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1504 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1505 &open_int, &dequeue_zero_copy);
1509 if (dequeue_zero_copy)
1510 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1513 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1514 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1515 &open_int, &iommu_support);
1520 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1523 if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
1524 ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
1525 &open_int, &postcopy_support);
1529 if (postcopy_support)
1530 flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
1533 if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
1534 ret = rte_kvargs_process(kvlist,
1535 ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
1541 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
1542 disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
1546 if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
1547 ret = rte_kvargs_process(kvlist,
1548 ETH_VHOST_LINEAR_BUF,
1549 &open_int, &linear_buf);
1553 if (linear_buf == 1)
1554 flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
1557 if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
1558 ret = rte_kvargs_process(kvlist,
1560 &open_int, &ext_buf);
1565 flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
1568 if (dev->device.numa_node == SOCKET_ID_ANY)
1569 dev->device.numa_node = rte_socket_id();
1571 ret = eth_dev_vhost_create(dev, iface_name, queues,
1572 dev->device.numa_node, flags, disable_flags);
1574 VHOST_LOG(ERR, "Failed to create %s\n", name);
1577 rte_kvargs_free(kvlist);
1582 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1585 struct rte_eth_dev *eth_dev = NULL;
1587 name = rte_vdev_device_name(dev);
1588 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1590 /* find an ethdev entry */
1591 eth_dev = rte_eth_dev_allocated(name);
1592 if (eth_dev == NULL)
1595 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1596 return rte_eth_dev_release_port(eth_dev);
1598 eth_dev_close(eth_dev);
1600 rte_eth_dev_release_port(eth_dev);
1605 static struct rte_vdev_driver pmd_vhost_drv = {
1606 .probe = rte_pmd_vhost_probe,
1607 .remove = rte_pmd_vhost_remove,
1610 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1611 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1612 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1616 "dequeue-zero-copy=<0|1> "
1617 "iommu-support=<0|1> "
1618 "postcopy-support=<0|1> "
1620 "linear-buffer=<0|1> "
1621 "ext-buffer=<0|1>");