1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 IGEL Co., Ltd.
3 * Copyright(c) 2016-2018 Intel Corporation
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_memcpy.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_kvargs.h>
16 #include <rte_vhost.h>
17 #include <rte_spinlock.h>
19 #include "rte_eth_vhost.h"
21 static int vhost_logtype;
23 #define VHOST_LOG(level, ...) \
24 rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
26 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
28 #define ETH_VHOST_IFACE_ARG "iface"
29 #define ETH_VHOST_QUEUES_ARG "queues"
30 #define ETH_VHOST_CLIENT_ARG "client"
31 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
32 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
33 #define VHOST_MAX_PKT_BURST 32
35 static const char *valid_arguments[] = {
39 ETH_VHOST_DEQUEUE_ZERO_COPY,
40 ETH_VHOST_IOMMU_SUPPORT,
44 static struct ether_addr base_eth_addr = {
55 enum vhost_xstats_pkts {
56 VHOST_UNDERSIZE_PKT = 0,
61 VHOST_512_TO_1023_PKT,
62 VHOST_1024_TO_1522_PKT,
63 VHOST_1523_TO_MAX_PKT,
68 VHOST_ERRORS_FRAGMENTED,
70 VHOST_UNKNOWN_PROTOCOL,
78 uint64_t xstats[VHOST_XSTATS_MAX];
83 rte_atomic32_t allow_queuing;
84 rte_atomic32_t while_queuing;
85 struct pmd_internal *internal;
86 struct rte_mempool *mb_pool;
88 uint16_t virtqueue_id;
89 struct vhost_stats stats;
93 rte_atomic32_t dev_attached;
98 rte_atomic32_t started;
102 struct internal_list {
103 TAILQ_ENTRY(internal_list) next;
104 struct rte_eth_dev *eth_dev;
107 TAILQ_HEAD(internal_list_head, internal_list);
108 static struct internal_list_head internal_list =
109 TAILQ_HEAD_INITIALIZER(internal_list);
111 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
113 static struct rte_eth_link pmd_link = {
115 .link_duplex = ETH_LINK_FULL_DUPLEX,
116 .link_status = ETH_LINK_DOWN
119 struct rte_vhost_vring_state {
122 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
123 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
125 unsigned int max_vring;
128 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
130 #define VHOST_XSTATS_NAME_SIZE 64
132 struct vhost_xstats_name_off {
133 char name[VHOST_XSTATS_NAME_SIZE];
137 /* [rx]_is prepended to the name string here */
138 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
140 offsetof(struct vhost_queue, stats.pkts)},
142 offsetof(struct vhost_queue, stats.bytes)},
144 offsetof(struct vhost_queue, stats.missed_pkts)},
145 {"broadcast_packets",
146 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
147 {"multicast_packets",
148 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
150 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
151 {"undersize_packets",
152 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
154 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
155 {"size_65_to_127_packets",
156 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
157 {"size_128_to_255_packets",
158 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
159 {"size_256_to_511_packets",
160 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
161 {"size_512_to_1023_packets",
162 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
163 {"size_1024_to_1522_packets",
164 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
165 {"size_1523_to_max_packets",
166 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
167 {"errors_with_bad_CRC",
168 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
169 {"fragmented_errors",
170 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
172 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
173 {"unknown_protos_packets",
174 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
177 /* [tx]_ is prepended to the name string here */
178 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
180 offsetof(struct vhost_queue, stats.pkts)},
182 offsetof(struct vhost_queue, stats.bytes)},
184 offsetof(struct vhost_queue, stats.missed_pkts)},
185 {"broadcast_packets",
186 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
187 {"multicast_packets",
188 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
190 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
191 {"undersize_packets",
192 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
194 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
195 {"size_65_to_127_packets",
196 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
197 {"size_128_to_255_packets",
198 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
199 {"size_256_to_511_packets",
200 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
201 {"size_512_to_1023_packets",
202 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
203 {"size_1024_to_1522_packets",
204 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
205 {"size_1523_to_max_packets",
206 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
207 {"errors_with_bad_CRC",
208 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
211 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
212 sizeof(vhost_rxport_stat_strings[0]))
214 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
215 sizeof(vhost_txport_stat_strings[0]))
218 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
220 struct vhost_queue *vq = NULL;
223 for (i = 0; i < dev->data->nb_rx_queues; i++) {
224 vq = dev->data->rx_queues[i];
227 memset(&vq->stats, 0, sizeof(vq->stats));
229 for (i = 0; i < dev->data->nb_tx_queues; i++) {
230 vq = dev->data->tx_queues[i];
233 memset(&vq->stats, 0, sizeof(vq->stats));
238 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
239 struct rte_eth_xstat_name *xstats_names,
240 unsigned int limit __rte_unused)
244 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
248 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
249 snprintf(xstats_names[count].name,
250 sizeof(xstats_names[count].name),
251 "rx_%s", vhost_rxport_stat_strings[t].name);
254 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
255 snprintf(xstats_names[count].name,
256 sizeof(xstats_names[count].name),
257 "tx_%s", vhost_txport_stat_strings[t].name);
264 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
269 unsigned int count = 0;
270 struct vhost_queue *vq = NULL;
271 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
276 for (i = 0; i < dev->data->nb_rx_queues; i++) {
277 vq = dev->data->rx_queues[i];
280 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
281 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
282 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
284 for (i = 0; i < dev->data->nb_tx_queues; i++) {
285 vq = dev->data->tx_queues[i];
288 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
289 + vq->stats.missed_pkts
290 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
291 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
293 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
294 xstats[count].value = 0;
295 for (i = 0; i < dev->data->nb_rx_queues; i++) {
296 vq = dev->data->rx_queues[i];
299 xstats[count].value +=
300 *(uint64_t *)(((char *)vq)
301 + vhost_rxport_stat_strings[t].offset);
303 xstats[count].id = count;
306 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
307 xstats[count].value = 0;
308 for (i = 0; i < dev->data->nb_tx_queues; i++) {
309 vq = dev->data->tx_queues[i];
312 xstats[count].value +=
313 *(uint64_t *)(((char *)vq)
314 + vhost_txport_stat_strings[t].offset);
316 xstats[count].id = count;
323 vhost_count_multicast_broadcast(struct vhost_queue *vq,
324 struct rte_mbuf *mbuf)
326 struct ether_addr *ea = NULL;
327 struct vhost_stats *pstats = &vq->stats;
329 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
330 if (is_multicast_ether_addr(ea)) {
331 if (is_broadcast_ether_addr(ea))
332 pstats->xstats[VHOST_BROADCAST_PKT]++;
334 pstats->xstats[VHOST_MULTICAST_PKT]++;
339 vhost_update_packet_xstats(struct vhost_queue *vq,
340 struct rte_mbuf **bufs,
343 uint32_t pkt_len = 0;
346 struct vhost_stats *pstats = &vq->stats;
348 for (i = 0; i < count ; i++) {
349 pkt_len = bufs[i]->pkt_len;
351 pstats->xstats[VHOST_64_PKT]++;
352 } else if (pkt_len > 64 && pkt_len < 1024) {
353 index = (sizeof(pkt_len) * 8)
354 - __builtin_clz(pkt_len) - 5;
355 pstats->xstats[index]++;
358 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
359 else if (pkt_len <= 1522)
360 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
361 else if (pkt_len > 1522)
362 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
364 vhost_count_multicast_broadcast(vq, bufs[i]);
369 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
371 struct vhost_queue *r = q;
372 uint16_t i, nb_rx = 0;
373 uint16_t nb_receive = nb_bufs;
375 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
378 rte_atomic32_set(&r->while_queuing, 1);
380 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
383 /* Dequeue packets from guest TX queue */
386 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
387 VHOST_MAX_PKT_BURST);
389 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
390 r->mb_pool, &bufs[nb_rx],
394 nb_receive -= nb_pkts;
399 r->stats.pkts += nb_rx;
401 for (i = 0; likely(i < nb_rx); i++) {
402 bufs[i]->port = r->port;
403 bufs[i]->vlan_tci = 0;
405 if (r->internal->vlan_strip)
406 rte_vlan_strip(bufs[i]);
408 r->stats.bytes += bufs[i]->pkt_len;
411 vhost_update_packet_xstats(r, bufs, nb_rx);
414 rte_atomic32_set(&r->while_queuing, 0);
420 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
422 struct vhost_queue *r = q;
423 uint16_t i, nb_tx = 0;
424 uint16_t nb_send = 0;
426 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
429 rte_atomic32_set(&r->while_queuing, 1);
431 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
434 for (i = 0; i < nb_bufs; i++) {
435 struct rte_mbuf *m = bufs[i];
437 /* Do VLAN tag insertion */
438 if (m->ol_flags & PKT_TX_VLAN_PKT) {
439 int error = rte_vlan_insert(&m);
440 if (unlikely(error)) {
450 /* Enqueue packets to guest RX queue */
453 uint16_t num = (uint16_t)RTE_MIN(nb_send,
454 VHOST_MAX_PKT_BURST);
456 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
465 r->stats.pkts += nb_tx;
466 r->stats.missed_pkts += nb_bufs - nb_tx;
468 for (i = 0; likely(i < nb_tx); i++)
469 r->stats.bytes += bufs[i]->pkt_len;
471 vhost_update_packet_xstats(r, bufs, nb_tx);
473 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
474 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
475 * are increased when packets are not transmitted successfully.
477 for (i = nb_tx; i < nb_bufs; i++)
478 vhost_count_multicast_broadcast(r, bufs[i]);
480 for (i = 0; likely(i < nb_tx); i++)
481 rte_pktmbuf_free(bufs[i]);
483 rte_atomic32_set(&r->while_queuing, 0);
489 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
491 struct pmd_internal *internal = dev->data->dev_private;
492 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
494 internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
499 static inline struct internal_list *
500 find_internal_resource(char *ifname)
503 struct internal_list *list;
504 struct pmd_internal *internal;
509 pthread_mutex_lock(&internal_list_lock);
511 TAILQ_FOREACH(list, &internal_list, next) {
512 internal = list->eth_dev->data->dev_private;
513 if (!strcmp(internal->iface_name, ifname)) {
519 pthread_mutex_unlock(&internal_list_lock);
528 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
530 struct rte_vhost_vring vring;
531 struct vhost_queue *vq;
534 vq = dev->data->rx_queues[qid];
536 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
540 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
542 VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
545 VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
546 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
553 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
555 struct rte_vhost_vring vring;
556 struct vhost_queue *vq;
559 vq = dev->data->rx_queues[qid];
561 VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
565 ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
567 VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
570 VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
571 rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
578 eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
580 struct rte_intr_handle *intr_handle = dev->intr_handle;
583 if (intr_handle->intr_vec)
584 free(intr_handle->intr_vec);
588 dev->intr_handle = NULL;
592 eth_vhost_install_intr(struct rte_eth_dev *dev)
594 struct rte_vhost_vring vring;
595 struct vhost_queue *vq;
597 int nb_rxq = dev->data->nb_rx_queues;
601 /* uninstall firstly if we are reconnecting */
602 if (dev->intr_handle)
603 eth_vhost_uninstall_intr(dev);
605 dev->intr_handle = malloc(sizeof(*dev->intr_handle));
606 if (!dev->intr_handle) {
607 VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
610 memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
612 dev->intr_handle->efd_counter_size = sizeof(uint64_t);
614 dev->intr_handle->intr_vec =
615 malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
617 if (!dev->intr_handle->intr_vec) {
619 "Failed to allocate memory for interrupt vector\n");
620 free(dev->intr_handle);
624 VHOST_LOG(INFO, "Prepare intr vec\n");
625 for (i = 0; i < nb_rxq; i++) {
626 vq = dev->data->rx_queues[i];
628 VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
632 ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
635 "Failed to get rxq-%d's vring, skip!\n", i);
639 if (vring.kickfd < 0) {
641 "rxq-%d's kickfd is invalid, skip!\n", i);
644 dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
645 dev->intr_handle->efds[i] = vring.kickfd;
647 VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
650 dev->intr_handle->nb_efd = count;
651 dev->intr_handle->max_intr = count + 1;
652 dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
658 update_queuing_status(struct rte_eth_dev *dev)
660 struct pmd_internal *internal = dev->data->dev_private;
661 struct vhost_queue *vq;
663 int allow_queuing = 1;
665 if (!dev->data->rx_queues || !dev->data->tx_queues)
668 if (rte_atomic32_read(&internal->started) == 0 ||
669 rte_atomic32_read(&internal->dev_attached) == 0)
672 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
673 for (i = 0; i < dev->data->nb_rx_queues; i++) {
674 vq = dev->data->rx_queues[i];
677 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
678 while (rte_atomic32_read(&vq->while_queuing))
682 for (i = 0; i < dev->data->nb_tx_queues; i++) {
683 vq = dev->data->tx_queues[i];
686 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
687 while (rte_atomic32_read(&vq->while_queuing))
693 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
695 struct vhost_queue *vq;
698 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
699 vq = eth_dev->data->rx_queues[i];
702 vq->vid = internal->vid;
703 vq->internal = internal;
704 vq->port = eth_dev->data->port_id;
706 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
707 vq = eth_dev->data->tx_queues[i];
710 vq->vid = internal->vid;
711 vq->internal = internal;
712 vq->port = eth_dev->data->port_id;
719 struct rte_eth_dev *eth_dev;
720 struct internal_list *list;
721 struct pmd_internal *internal;
722 struct rte_eth_conf *dev_conf;
724 char ifname[PATH_MAX];
725 #ifdef RTE_LIBRTE_VHOST_NUMA
729 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
730 list = find_internal_resource(ifname);
732 VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
736 eth_dev = list->eth_dev;
737 internal = eth_dev->data->dev_private;
738 dev_conf = ð_dev->data->dev_conf;
740 #ifdef RTE_LIBRTE_VHOST_NUMA
741 newnode = rte_vhost_get_numa_node(vid);
743 eth_dev->data->numa_node = newnode;
747 if (rte_atomic32_read(&internal->started) == 1) {
748 queue_setup(eth_dev, internal);
750 if (dev_conf->intr_conf.rxq) {
751 if (eth_vhost_install_intr(eth_dev) < 0) {
753 "Failed to install interrupt handler.");
758 VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
761 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
762 rte_vhost_enable_guest_notification(vid, i, 0);
764 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
766 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
768 rte_atomic32_set(&internal->dev_attached, 1);
769 update_queuing_status(eth_dev);
771 VHOST_LOG(INFO, "Vhost device %d created\n", vid);
773 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
779 destroy_device(int vid)
781 struct rte_eth_dev *eth_dev;
782 struct pmd_internal *internal;
783 struct vhost_queue *vq;
784 struct internal_list *list;
785 char ifname[PATH_MAX];
787 struct rte_vhost_vring_state *state;
789 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
790 list = find_internal_resource(ifname);
792 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
795 eth_dev = list->eth_dev;
796 internal = eth_dev->data->dev_private;
798 rte_atomic32_set(&internal->dev_attached, 0);
799 update_queuing_status(eth_dev);
801 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
803 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
804 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
805 vq = eth_dev->data->rx_queues[i];
810 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
811 vq = eth_dev->data->tx_queues[i];
818 state = vring_states[eth_dev->data->port_id];
819 rte_spinlock_lock(&state->lock);
820 for (i = 0; i <= state->max_vring; i++) {
821 state->cur[i] = false;
822 state->seen[i] = false;
824 state->max_vring = 0;
825 rte_spinlock_unlock(&state->lock);
827 VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
828 eth_vhost_uninstall_intr(eth_dev);
830 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
834 vring_state_changed(int vid, uint16_t vring, int enable)
836 struct rte_vhost_vring_state *state;
837 struct rte_eth_dev *eth_dev;
838 struct internal_list *list;
839 char ifname[PATH_MAX];
841 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
842 list = find_internal_resource(ifname);
844 VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
848 eth_dev = list->eth_dev;
850 state = vring_states[eth_dev->data->port_id];
851 rte_spinlock_lock(&state->lock);
852 state->cur[vring] = enable;
853 state->max_vring = RTE_MAX(vring, state->max_vring);
854 rte_spinlock_unlock(&state->lock);
856 VHOST_LOG(INFO, "vring%u is %s\n",
857 vring, enable ? "enabled" : "disabled");
859 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
864 static struct vhost_device_ops vhost_ops = {
865 .new_device = new_device,
866 .destroy_device = destroy_device,
867 .vring_state_changed = vring_state_changed,
871 rte_eth_vhost_get_queue_event(uint16_t port_id,
872 struct rte_eth_vhost_queue_event *event)
874 struct rte_vhost_vring_state *state;
878 if (port_id >= RTE_MAX_ETHPORTS) {
879 VHOST_LOG(ERR, "Invalid port id\n");
883 state = vring_states[port_id];
885 VHOST_LOG(ERR, "Unused port\n");
889 rte_spinlock_lock(&state->lock);
890 for (i = 0; i <= state->max_vring; i++) {
891 idx = state->index++ % (state->max_vring + 1);
893 if (state->cur[idx] != state->seen[idx]) {
894 state->seen[idx] = state->cur[idx];
895 event->queue_id = idx / 2;
897 event->enable = state->cur[idx];
898 rte_spinlock_unlock(&state->lock);
902 rte_spinlock_unlock(&state->lock);
908 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
910 struct internal_list *list;
911 struct rte_eth_dev *eth_dev;
912 struct vhost_queue *vq;
915 if (!rte_eth_dev_is_valid_port(port_id))
918 pthread_mutex_lock(&internal_list_lock);
920 TAILQ_FOREACH(list, &internal_list, next) {
921 eth_dev = list->eth_dev;
922 if (eth_dev->data->port_id == port_id) {
923 vq = eth_dev->data->rx_queues[0];
931 pthread_mutex_unlock(&internal_list_lock);
937 eth_dev_start(struct rte_eth_dev *eth_dev)
939 struct pmd_internal *internal = eth_dev->data->dev_private;
940 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
942 queue_setup(eth_dev, internal);
944 if (rte_atomic32_read(&internal->dev_attached) == 1) {
945 if (dev_conf->intr_conf.rxq) {
946 if (eth_vhost_install_intr(eth_dev) < 0) {
948 "Failed to install interrupt handler.");
954 rte_atomic32_set(&internal->started, 1);
955 update_queuing_status(eth_dev);
961 eth_dev_stop(struct rte_eth_dev *dev)
963 struct pmd_internal *internal = dev->data->dev_private;
965 rte_atomic32_set(&internal->started, 0);
966 update_queuing_status(dev);
970 eth_dev_close(struct rte_eth_dev *dev)
972 struct pmd_internal *internal;
973 struct internal_list *list;
976 internal = dev->data->dev_private;
982 rte_vhost_driver_unregister(internal->iface_name);
984 list = find_internal_resource(internal->iface_name);
988 pthread_mutex_lock(&internal_list_lock);
989 TAILQ_REMOVE(&internal_list, list, next);
990 pthread_mutex_unlock(&internal_list_lock);
993 if (dev->data->rx_queues)
994 for (i = 0; i < dev->data->nb_rx_queues; i++)
995 rte_free(dev->data->rx_queues[i]);
997 if (dev->data->tx_queues)
998 for (i = 0; i < dev->data->nb_tx_queues; i++)
999 rte_free(dev->data->tx_queues[i]);
1001 rte_free(dev->data->mac_addrs);
1002 free(internal->dev_name);
1003 free(internal->iface_name);
1006 dev->data->dev_private = NULL;
1010 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1011 uint16_t nb_rx_desc __rte_unused,
1012 unsigned int socket_id,
1013 const struct rte_eth_rxconf *rx_conf __rte_unused,
1014 struct rte_mempool *mb_pool)
1016 struct vhost_queue *vq;
1018 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1019 RTE_CACHE_LINE_SIZE, socket_id);
1021 VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
1025 vq->mb_pool = mb_pool;
1026 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
1027 dev->data->rx_queues[rx_queue_id] = vq;
1033 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1034 uint16_t nb_tx_desc __rte_unused,
1035 unsigned int socket_id,
1036 const struct rte_eth_txconf *tx_conf __rte_unused)
1038 struct vhost_queue *vq;
1040 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
1041 RTE_CACHE_LINE_SIZE, socket_id);
1043 VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
1047 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
1048 dev->data->tx_queues[tx_queue_id] = vq;
1054 eth_dev_info(struct rte_eth_dev *dev,
1055 struct rte_eth_dev_info *dev_info)
1057 struct pmd_internal *internal;
1059 internal = dev->data->dev_private;
1060 if (internal == NULL) {
1061 VHOST_LOG(ERR, "Invalid device specified\n");
1065 dev_info->max_mac_addrs = 1;
1066 dev_info->max_rx_pktlen = (uint32_t)-1;
1067 dev_info->max_rx_queues = internal->max_queues;
1068 dev_info->max_tx_queues = internal->max_queues;
1069 dev_info->min_rx_bufsize = 0;
1071 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
1072 DEV_TX_OFFLOAD_VLAN_INSERT;
1073 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1077 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1080 unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
1081 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
1082 struct vhost_queue *vq;
1084 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1085 i < dev->data->nb_rx_queues; i++) {
1086 if (dev->data->rx_queues[i] == NULL)
1088 vq = dev->data->rx_queues[i];
1089 stats->q_ipackets[i] = vq->stats.pkts;
1090 rx_total += stats->q_ipackets[i];
1092 stats->q_ibytes[i] = vq->stats.bytes;
1093 rx_total_bytes += stats->q_ibytes[i];
1096 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
1097 i < dev->data->nb_tx_queues; i++) {
1098 if (dev->data->tx_queues[i] == NULL)
1100 vq = dev->data->tx_queues[i];
1101 stats->q_opackets[i] = vq->stats.pkts;
1102 tx_missed_total += vq->stats.missed_pkts;
1103 tx_total += stats->q_opackets[i];
1105 stats->q_obytes[i] = vq->stats.bytes;
1106 tx_total_bytes += stats->q_obytes[i];
1109 stats->ipackets = rx_total;
1110 stats->opackets = tx_total;
1111 stats->oerrors = tx_missed_total;
1112 stats->ibytes = rx_total_bytes;
1113 stats->obytes = tx_total_bytes;
1119 eth_stats_reset(struct rte_eth_dev *dev)
1121 struct vhost_queue *vq;
1124 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1125 if (dev->data->rx_queues[i] == NULL)
1127 vq = dev->data->rx_queues[i];
1129 vq->stats.bytes = 0;
1131 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1132 if (dev->data->tx_queues[i] == NULL)
1134 vq = dev->data->tx_queues[i];
1136 vq->stats.bytes = 0;
1137 vq->stats.missed_pkts = 0;
1142 eth_queue_release(void *q)
1148 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
1151 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
1152 * and releases mbuf, so nothing to cleanup.
1158 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1159 int wait_to_complete __rte_unused)
1165 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1167 struct vhost_queue *vq;
1169 vq = dev->data->rx_queues[rx_queue_id];
1173 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1176 static const struct eth_dev_ops ops = {
1177 .dev_start = eth_dev_start,
1178 .dev_stop = eth_dev_stop,
1179 .dev_close = eth_dev_close,
1180 .dev_configure = eth_dev_configure,
1181 .dev_infos_get = eth_dev_info,
1182 .rx_queue_setup = eth_rx_queue_setup,
1183 .tx_queue_setup = eth_tx_queue_setup,
1184 .rx_queue_release = eth_queue_release,
1185 .tx_queue_release = eth_queue_release,
1186 .tx_done_cleanup = eth_tx_done_cleanup,
1187 .rx_queue_count = eth_rx_queue_count,
1188 .link_update = eth_link_update,
1189 .stats_get = eth_stats_get,
1190 .stats_reset = eth_stats_reset,
1191 .xstats_reset = vhost_dev_xstats_reset,
1192 .xstats_get = vhost_dev_xstats_get,
1193 .xstats_get_names = vhost_dev_xstats_get_names,
1194 .rx_queue_intr_enable = eth_rxq_intr_enable,
1195 .rx_queue_intr_disable = eth_rxq_intr_disable,
1198 static struct rte_vdev_driver pmd_vhost_drv;
1201 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1202 int16_t queues, const unsigned int numa_node, uint64_t flags)
1204 const char *name = rte_vdev_device_name(dev);
1205 struct rte_eth_dev_data *data;
1206 struct pmd_internal *internal = NULL;
1207 struct rte_eth_dev *eth_dev = NULL;
1208 struct ether_addr *eth_addr = NULL;
1209 struct rte_vhost_vring_state *vring_state = NULL;
1210 struct internal_list *list = NULL;
1212 VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
1215 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1219 /* reserve an ethdev entry */
1220 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1221 if (eth_dev == NULL)
1224 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1225 if (eth_addr == NULL)
1227 *eth_addr = base_eth_addr;
1228 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1230 vring_state = rte_zmalloc_socket(name,
1231 sizeof(*vring_state), 0, numa_node);
1232 if (vring_state == NULL)
1235 /* now put it all together
1236 * - store queue data in internal,
1237 * - point eth_dev_data to internals
1238 * - and point eth_dev structure to new eth_dev_data structure
1240 internal = eth_dev->data->dev_private;
1241 internal->dev_name = strdup(name);
1242 if (internal->dev_name == NULL)
1244 internal->iface_name = strdup(iface_name);
1245 if (internal->iface_name == NULL)
1248 list->eth_dev = eth_dev;
1249 pthread_mutex_lock(&internal_list_lock);
1250 TAILQ_INSERT_TAIL(&internal_list, list, next);
1251 pthread_mutex_unlock(&internal_list_lock);
1253 rte_spinlock_init(&vring_state->lock);
1254 vring_states[eth_dev->data->port_id] = vring_state;
1256 data = eth_dev->data;
1257 data->nb_rx_queues = queues;
1258 data->nb_tx_queues = queues;
1259 internal->max_queues = queues;
1261 data->dev_link = pmd_link;
1262 data->mac_addrs = eth_addr;
1263 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1265 eth_dev->dev_ops = &ops;
1267 /* finally assign rx and tx ops */
1268 eth_dev->rx_pkt_burst = eth_vhost_rx;
1269 eth_dev->tx_pkt_burst = eth_vhost_tx;
1271 if (rte_vhost_driver_register(iface_name, flags))
1274 if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1275 VHOST_LOG(ERR, "Can't register callbacks\n");
1279 if (rte_vhost_driver_start(iface_name) < 0) {
1280 VHOST_LOG(ERR, "Failed to start driver for %s\n",
1285 rte_eth_dev_probing_finish(eth_dev);
1286 return data->port_id;
1290 free(internal->iface_name);
1291 free(internal->dev_name);
1293 rte_free(vring_state);
1296 rte_eth_dev_release_port(eth_dev);
1304 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1306 const char **iface_name = extra_args;
1311 *iface_name = value;
1317 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1319 uint16_t *n = extra_args;
1321 if (value == NULL || extra_args == NULL)
1324 *n = (uint16_t)strtoul(value, NULL, 0);
1325 if (*n == USHRT_MAX && errno == ERANGE)
1332 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1334 struct rte_kvargs *kvlist = NULL;
1339 int client_mode = 0;
1340 int dequeue_zero_copy = 0;
1341 int iommu_support = 0;
1342 struct rte_eth_dev *eth_dev;
1343 const char *name = rte_vdev_device_name(dev);
1345 VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
1347 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1348 eth_dev = rte_eth_dev_attach_secondary(name);
1350 VHOST_LOG(ERR, "Failed to probe %s\n", name);
1353 /* TODO: request info from primary to set up Rx and Tx */
1354 eth_dev->dev_ops = &ops;
1355 eth_dev->device = &dev->device;
1356 rte_eth_dev_probing_finish(eth_dev);
1360 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1364 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1365 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1366 &open_iface, &iface_name);
1374 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1375 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1376 &open_int, &queues);
1377 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1383 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1384 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1385 &open_int, &client_mode);
1390 flags |= RTE_VHOST_USER_CLIENT;
1393 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1394 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1395 &open_int, &dequeue_zero_copy);
1399 if (dequeue_zero_copy)
1400 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1403 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1404 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1405 &open_int, &iommu_support);
1410 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1413 if (dev->device.numa_node == SOCKET_ID_ANY)
1414 dev->device.numa_node = rte_socket_id();
1416 eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1420 rte_kvargs_free(kvlist);
1425 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1428 struct rte_eth_dev *eth_dev = NULL;
1430 name = rte_vdev_device_name(dev);
1431 VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
1433 /* find an ethdev entry */
1434 eth_dev = rte_eth_dev_allocated(name);
1435 if (eth_dev == NULL)
1438 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1439 return rte_eth_dev_release_port_secondary(eth_dev);
1441 eth_dev_close(eth_dev);
1443 rte_free(vring_states[eth_dev->data->port_id]);
1444 vring_states[eth_dev->data->port_id] = NULL;
1446 rte_eth_dev_release_port(eth_dev);
1451 static struct rte_vdev_driver pmd_vhost_drv = {
1452 .probe = rte_pmd_vhost_probe,
1453 .remove = rte_pmd_vhost_remove,
1456 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1457 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1458 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1462 RTE_INIT(vhost_init_log)
1464 vhost_logtype = rte_log_register("pmd.net.vhost");
1465 if (vhost_logtype >= 0)
1466 rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);