4 * Copyright (c) 2016 IGEL Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev_driver.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
47 #include "rte_eth_vhost.h"
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
51 #define ETH_VHOST_IFACE_ARG "iface"
52 #define ETH_VHOST_QUEUES_ARG "queues"
53 #define ETH_VHOST_CLIENT_ARG "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
55 #define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
56 #define VHOST_MAX_PKT_BURST 32
58 static const char *valid_arguments[] = {
62 ETH_VHOST_DEQUEUE_ZERO_COPY,
63 ETH_VHOST_IOMMU_SUPPORT,
67 static struct ether_addr base_eth_addr = {
78 enum vhost_xstats_pkts {
79 VHOST_UNDERSIZE_PKT = 0,
84 VHOST_512_TO_1023_PKT,
85 VHOST_1024_TO_1522_PKT,
86 VHOST_1523_TO_MAX_PKT,
91 VHOST_ERRORS_FRAGMENTED,
93 VHOST_UNKNOWN_PROTOCOL,
100 uint64_t missed_pkts;
101 uint64_t xstats[VHOST_XSTATS_MAX];
106 rte_atomic32_t allow_queuing;
107 rte_atomic32_t while_queuing;
108 struct pmd_internal *internal;
109 struct rte_mempool *mb_pool;
111 uint16_t virtqueue_id;
112 struct vhost_stats stats;
115 struct pmd_internal {
116 rte_atomic32_t dev_attached;
121 rte_atomic32_t started;
124 struct internal_list {
125 TAILQ_ENTRY(internal_list) next;
126 struct rte_eth_dev *eth_dev;
129 TAILQ_HEAD(internal_list_head, internal_list);
130 static struct internal_list_head internal_list =
131 TAILQ_HEAD_INITIALIZER(internal_list);
133 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
135 static struct rte_eth_link pmd_link = {
137 .link_duplex = ETH_LINK_FULL_DUPLEX,
138 .link_status = ETH_LINK_DOWN
141 struct rte_vhost_vring_state {
144 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
145 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
147 unsigned int max_vring;
150 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
152 #define VHOST_XSTATS_NAME_SIZE 64
154 struct vhost_xstats_name_off {
155 char name[VHOST_XSTATS_NAME_SIZE];
159 /* [rx]_is prepended to the name string here */
160 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
162 offsetof(struct vhost_queue, stats.pkts)},
164 offsetof(struct vhost_queue, stats.bytes)},
166 offsetof(struct vhost_queue, stats.missed_pkts)},
167 {"broadcast_packets",
168 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
169 {"multicast_packets",
170 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
172 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
173 {"undersize_packets",
174 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
176 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
177 {"size_65_to_127_packets",
178 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
179 {"size_128_to_255_packets",
180 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
181 {"size_256_to_511_packets",
182 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
183 {"size_512_to_1023_packets",
184 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
185 {"size_1024_to_1522_packets",
186 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
187 {"size_1523_to_max_packets",
188 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
189 {"errors_with_bad_CRC",
190 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
191 {"fragmented_errors",
192 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
194 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
195 {"unknown_protos_packets",
196 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
199 /* [tx]_ is prepended to the name string here */
200 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
202 offsetof(struct vhost_queue, stats.pkts)},
204 offsetof(struct vhost_queue, stats.bytes)},
206 offsetof(struct vhost_queue, stats.missed_pkts)},
207 {"broadcast_packets",
208 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
209 {"multicast_packets",
210 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
212 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
213 {"undersize_packets",
214 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
216 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
217 {"size_65_to_127_packets",
218 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
219 {"size_128_to_255_packets",
220 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
221 {"size_256_to_511_packets",
222 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
223 {"size_512_to_1023_packets",
224 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
225 {"size_1024_to_1522_packets",
226 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
227 {"size_1523_to_max_packets",
228 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
229 {"errors_with_bad_CRC",
230 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
233 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
234 sizeof(vhost_rxport_stat_strings[0]))
236 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
237 sizeof(vhost_txport_stat_strings[0]))
240 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
242 struct vhost_queue *vq = NULL;
245 for (i = 0; i < dev->data->nb_rx_queues; i++) {
246 vq = dev->data->rx_queues[i];
249 memset(&vq->stats, 0, sizeof(vq->stats));
251 for (i = 0; i < dev->data->nb_tx_queues; i++) {
252 vq = dev->data->tx_queues[i];
255 memset(&vq->stats, 0, sizeof(vq->stats));
260 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
261 struct rte_eth_xstat_name *xstats_names,
262 unsigned int limit __rte_unused)
266 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
270 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
271 snprintf(xstats_names[count].name,
272 sizeof(xstats_names[count].name),
273 "rx_%s", vhost_rxport_stat_strings[t].name);
276 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
277 snprintf(xstats_names[count].name,
278 sizeof(xstats_names[count].name),
279 "tx_%s", vhost_txport_stat_strings[t].name);
286 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
291 unsigned int count = 0;
292 struct vhost_queue *vq = NULL;
293 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
298 for (i = 0; i < dev->data->nb_rx_queues; i++) {
299 vq = dev->data->rx_queues[i];
302 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
303 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
304 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
306 for (i = 0; i < dev->data->nb_tx_queues; i++) {
307 vq = dev->data->tx_queues[i];
310 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
311 + vq->stats.missed_pkts
312 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
313 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
315 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
316 xstats[count].value = 0;
317 for (i = 0; i < dev->data->nb_rx_queues; i++) {
318 vq = dev->data->rx_queues[i];
321 xstats[count].value +=
322 *(uint64_t *)(((char *)vq)
323 + vhost_rxport_stat_strings[t].offset);
325 xstats[count].id = count;
328 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
329 xstats[count].value = 0;
330 for (i = 0; i < dev->data->nb_tx_queues; i++) {
331 vq = dev->data->tx_queues[i];
334 xstats[count].value +=
335 *(uint64_t *)(((char *)vq)
336 + vhost_txport_stat_strings[t].offset);
338 xstats[count].id = count;
345 vhost_count_multicast_broadcast(struct vhost_queue *vq,
346 struct rte_mbuf *mbuf)
348 struct ether_addr *ea = NULL;
349 struct vhost_stats *pstats = &vq->stats;
351 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
352 if (is_multicast_ether_addr(ea)) {
353 if (is_broadcast_ether_addr(ea))
354 pstats->xstats[VHOST_BROADCAST_PKT]++;
356 pstats->xstats[VHOST_MULTICAST_PKT]++;
361 vhost_update_packet_xstats(struct vhost_queue *vq,
362 struct rte_mbuf **bufs,
365 uint32_t pkt_len = 0;
368 struct vhost_stats *pstats = &vq->stats;
370 for (i = 0; i < count ; i++) {
371 pkt_len = bufs[i]->pkt_len;
373 pstats->xstats[VHOST_64_PKT]++;
374 } else if (pkt_len > 64 && pkt_len < 1024) {
375 index = (sizeof(pkt_len) * 8)
376 - __builtin_clz(pkt_len) - 5;
377 pstats->xstats[index]++;
380 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
381 else if (pkt_len <= 1522)
382 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
383 else if (pkt_len > 1522)
384 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
386 vhost_count_multicast_broadcast(vq, bufs[i]);
391 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
393 struct vhost_queue *r = q;
394 uint16_t i, nb_rx = 0;
395 uint16_t nb_receive = nb_bufs;
397 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
400 rte_atomic32_set(&r->while_queuing, 1);
402 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
405 /* Dequeue packets from guest TX queue */
408 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
409 VHOST_MAX_PKT_BURST);
411 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
412 r->mb_pool, &bufs[nb_rx],
416 nb_receive -= nb_pkts;
421 r->stats.pkts += nb_rx;
423 for (i = 0; likely(i < nb_rx); i++) {
424 bufs[i]->port = r->port;
425 r->stats.bytes += bufs[i]->pkt_len;
428 vhost_update_packet_xstats(r, bufs, nb_rx);
431 rte_atomic32_set(&r->while_queuing, 0);
437 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
439 struct vhost_queue *r = q;
440 uint16_t i, nb_tx = 0;
441 uint16_t nb_send = nb_bufs;
443 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
446 rte_atomic32_set(&r->while_queuing, 1);
448 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
451 /* Enqueue packets to guest RX queue */
454 uint16_t num = (uint16_t)RTE_MIN(nb_send,
455 VHOST_MAX_PKT_BURST);
457 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
466 r->stats.pkts += nb_tx;
467 r->stats.missed_pkts += nb_bufs - nb_tx;
469 for (i = 0; likely(i < nb_tx); i++)
470 r->stats.bytes += bufs[i]->pkt_len;
472 vhost_update_packet_xstats(r, bufs, nb_tx);
474 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
475 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
476 * are increased when packets are not transmitted successfully.
478 for (i = nb_tx; i < nb_bufs; i++)
479 vhost_count_multicast_broadcast(r, bufs[i]);
481 for (i = 0; likely(i < nb_tx); i++)
482 rte_pktmbuf_free(bufs[i]);
484 rte_atomic32_set(&r->while_queuing, 0);
490 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
495 static inline struct internal_list *
496 find_internal_resource(char *ifname)
499 struct internal_list *list;
500 struct pmd_internal *internal;
505 pthread_mutex_lock(&internal_list_lock);
507 TAILQ_FOREACH(list, &internal_list, next) {
508 internal = list->eth_dev->data->dev_private;
509 if (!strcmp(internal->iface_name, ifname)) {
515 pthread_mutex_unlock(&internal_list_lock);
524 update_queuing_status(struct rte_eth_dev *dev)
526 struct pmd_internal *internal = dev->data->dev_private;
527 struct vhost_queue *vq;
529 int allow_queuing = 1;
531 if (rte_atomic32_read(&internal->dev_attached) == 0)
534 if (rte_atomic32_read(&internal->started) == 0)
537 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
538 for (i = 0; i < dev->data->nb_rx_queues; i++) {
539 vq = dev->data->rx_queues[i];
542 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
543 while (rte_atomic32_read(&vq->while_queuing))
547 for (i = 0; i < dev->data->nb_tx_queues; i++) {
548 vq = dev->data->tx_queues[i];
551 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
552 while (rte_atomic32_read(&vq->while_queuing))
558 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
560 struct vhost_queue *vq;
563 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
564 vq = eth_dev->data->rx_queues[i];
567 vq->vid = internal->vid;
568 vq->internal = internal;
569 vq->port = eth_dev->data->port_id;
571 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
572 vq = eth_dev->data->tx_queues[i];
575 vq->vid = internal->vid;
576 vq->internal = internal;
577 vq->port = eth_dev->data->port_id;
584 struct rte_eth_dev *eth_dev;
585 struct internal_list *list;
586 struct pmd_internal *internal;
588 char ifname[PATH_MAX];
589 #ifdef RTE_LIBRTE_VHOST_NUMA
593 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
594 list = find_internal_resource(ifname);
596 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
600 eth_dev = list->eth_dev;
601 internal = eth_dev->data->dev_private;
603 #ifdef RTE_LIBRTE_VHOST_NUMA
604 newnode = rte_vhost_get_numa_node(vid);
606 eth_dev->data->numa_node = newnode;
610 if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
611 queue_setup(eth_dev, internal);
612 rte_atomic32_set(&internal->dev_attached, 1);
614 RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
615 rte_atomic32_set(&internal->dev_attached, 0);
618 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
619 rte_vhost_enable_guest_notification(vid, i, 0);
621 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
623 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
625 update_queuing_status(eth_dev);
627 RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
629 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
635 destroy_device(int vid)
637 struct rte_eth_dev *eth_dev;
638 struct pmd_internal *internal;
639 struct vhost_queue *vq;
640 struct internal_list *list;
641 char ifname[PATH_MAX];
643 struct rte_vhost_vring_state *state;
645 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
646 list = find_internal_resource(ifname);
648 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
651 eth_dev = list->eth_dev;
652 internal = eth_dev->data->dev_private;
654 rte_atomic32_set(&internal->started, 0);
655 update_queuing_status(eth_dev);
656 rte_atomic32_set(&internal->dev_attached, 0);
658 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
660 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
661 vq = eth_dev->data->rx_queues[i];
666 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
667 vq = eth_dev->data->tx_queues[i];
673 state = vring_states[eth_dev->data->port_id];
674 rte_spinlock_lock(&state->lock);
675 for (i = 0; i <= state->max_vring; i++) {
676 state->cur[i] = false;
677 state->seen[i] = false;
679 state->max_vring = 0;
680 rte_spinlock_unlock(&state->lock);
682 RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
684 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
688 vring_state_changed(int vid, uint16_t vring, int enable)
690 struct rte_vhost_vring_state *state;
691 struct rte_eth_dev *eth_dev;
692 struct internal_list *list;
693 char ifname[PATH_MAX];
695 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
696 list = find_internal_resource(ifname);
698 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
702 eth_dev = list->eth_dev;
704 state = vring_states[eth_dev->data->port_id];
705 rte_spinlock_lock(&state->lock);
706 state->cur[vring] = enable;
707 state->max_vring = RTE_MAX(vring, state->max_vring);
708 rte_spinlock_unlock(&state->lock);
710 RTE_LOG(INFO, PMD, "vring%u is %s\n",
711 vring, enable ? "enabled" : "disabled");
713 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
718 static struct vhost_device_ops vhost_ops = {
719 .new_device = new_device,
720 .destroy_device = destroy_device,
721 .vring_state_changed = vring_state_changed,
725 rte_eth_vhost_get_queue_event(uint16_t port_id,
726 struct rte_eth_vhost_queue_event *event)
728 struct rte_vhost_vring_state *state;
732 if (port_id >= RTE_MAX_ETHPORTS) {
733 RTE_LOG(ERR, PMD, "Invalid port id\n");
737 state = vring_states[port_id];
739 RTE_LOG(ERR, PMD, "Unused port\n");
743 rte_spinlock_lock(&state->lock);
744 for (i = 0; i <= state->max_vring; i++) {
745 idx = state->index++ % (state->max_vring + 1);
747 if (state->cur[idx] != state->seen[idx]) {
748 state->seen[idx] = state->cur[idx];
749 event->queue_id = idx / 2;
751 event->enable = state->cur[idx];
752 rte_spinlock_unlock(&state->lock);
756 rte_spinlock_unlock(&state->lock);
762 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
764 struct internal_list *list;
765 struct rte_eth_dev *eth_dev;
766 struct vhost_queue *vq;
769 if (!rte_eth_dev_is_valid_port(port_id))
772 pthread_mutex_lock(&internal_list_lock);
774 TAILQ_FOREACH(list, &internal_list, next) {
775 eth_dev = list->eth_dev;
776 if (eth_dev->data->port_id == port_id) {
777 vq = eth_dev->data->rx_queues[0];
785 pthread_mutex_unlock(&internal_list_lock);
791 eth_dev_start(struct rte_eth_dev *eth_dev)
793 struct pmd_internal *internal = eth_dev->data->dev_private;
795 if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
796 queue_setup(eth_dev, internal);
797 rte_atomic32_set(&internal->dev_attached, 1);
800 rte_atomic32_set(&internal->started, 1);
801 update_queuing_status(eth_dev);
807 eth_dev_stop(struct rte_eth_dev *dev)
809 struct pmd_internal *internal = dev->data->dev_private;
811 rte_atomic32_set(&internal->started, 0);
812 update_queuing_status(dev);
816 eth_dev_close(struct rte_eth_dev *dev)
818 struct pmd_internal *internal;
819 struct internal_list *list;
822 internal = dev->data->dev_private;
828 rte_vhost_driver_unregister(internal->iface_name);
830 list = find_internal_resource(internal->iface_name);
834 pthread_mutex_lock(&internal_list_lock);
835 TAILQ_REMOVE(&internal_list, list, next);
836 pthread_mutex_unlock(&internal_list_lock);
839 for (i = 0; i < dev->data->nb_rx_queues; i++)
840 rte_free(dev->data->rx_queues[i]);
841 for (i = 0; i < dev->data->nb_tx_queues; i++)
842 rte_free(dev->data->tx_queues[i]);
844 rte_free(dev->data->mac_addrs);
845 free(internal->dev_name);
846 free(internal->iface_name);
849 dev->data->dev_private = NULL;
853 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
854 uint16_t nb_rx_desc __rte_unused,
855 unsigned int socket_id,
856 const struct rte_eth_rxconf *rx_conf __rte_unused,
857 struct rte_mempool *mb_pool)
859 struct vhost_queue *vq;
861 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
862 RTE_CACHE_LINE_SIZE, socket_id);
864 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
868 vq->mb_pool = mb_pool;
869 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
870 dev->data->rx_queues[rx_queue_id] = vq;
876 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
877 uint16_t nb_tx_desc __rte_unused,
878 unsigned int socket_id,
879 const struct rte_eth_txconf *tx_conf __rte_unused)
881 struct vhost_queue *vq;
883 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
884 RTE_CACHE_LINE_SIZE, socket_id);
886 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
890 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
891 dev->data->tx_queues[tx_queue_id] = vq;
897 eth_dev_info(struct rte_eth_dev *dev,
898 struct rte_eth_dev_info *dev_info)
900 struct pmd_internal *internal;
902 internal = dev->data->dev_private;
903 if (internal == NULL) {
904 RTE_LOG(ERR, PMD, "Invalid device specified\n");
908 dev_info->max_mac_addrs = 1;
909 dev_info->max_rx_pktlen = (uint32_t)-1;
910 dev_info->max_rx_queues = internal->max_queues;
911 dev_info->max_tx_queues = internal->max_queues;
912 dev_info->min_rx_bufsize = 0;
916 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
919 unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
920 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
921 struct vhost_queue *vq;
923 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
924 i < dev->data->nb_rx_queues; i++) {
925 if (dev->data->rx_queues[i] == NULL)
927 vq = dev->data->rx_queues[i];
928 stats->q_ipackets[i] = vq->stats.pkts;
929 rx_total += stats->q_ipackets[i];
931 stats->q_ibytes[i] = vq->stats.bytes;
932 rx_total_bytes += stats->q_ibytes[i];
935 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
936 i < dev->data->nb_tx_queues; i++) {
937 if (dev->data->tx_queues[i] == NULL)
939 vq = dev->data->tx_queues[i];
940 stats->q_opackets[i] = vq->stats.pkts;
941 tx_missed_total += vq->stats.missed_pkts;
942 tx_total += stats->q_opackets[i];
944 stats->q_obytes[i] = vq->stats.bytes;
945 tx_total_bytes += stats->q_obytes[i];
948 stats->ipackets = rx_total;
949 stats->opackets = tx_total;
950 stats->oerrors = tx_missed_total;
951 stats->ibytes = rx_total_bytes;
952 stats->obytes = tx_total_bytes;
958 eth_stats_reset(struct rte_eth_dev *dev)
960 struct vhost_queue *vq;
963 for (i = 0; i < dev->data->nb_rx_queues; i++) {
964 if (dev->data->rx_queues[i] == NULL)
966 vq = dev->data->rx_queues[i];
970 for (i = 0; i < dev->data->nb_tx_queues; i++) {
971 if (dev->data->tx_queues[i] == NULL)
973 vq = dev->data->tx_queues[i];
976 vq->stats.missed_pkts = 0;
981 eth_queue_release(void *q)
987 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
990 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
991 * and releases mbuf, so nothing to cleanup.
997 eth_link_update(struct rte_eth_dev *dev __rte_unused,
998 int wait_to_complete __rte_unused)
1004 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1006 struct vhost_queue *vq;
1008 vq = dev->data->rx_queues[rx_queue_id];
1012 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1015 static const struct eth_dev_ops ops = {
1016 .dev_start = eth_dev_start,
1017 .dev_stop = eth_dev_stop,
1018 .dev_close = eth_dev_close,
1019 .dev_configure = eth_dev_configure,
1020 .dev_infos_get = eth_dev_info,
1021 .rx_queue_setup = eth_rx_queue_setup,
1022 .tx_queue_setup = eth_tx_queue_setup,
1023 .rx_queue_release = eth_queue_release,
1024 .tx_queue_release = eth_queue_release,
1025 .tx_done_cleanup = eth_tx_done_cleanup,
1026 .rx_queue_count = eth_rx_queue_count,
1027 .link_update = eth_link_update,
1028 .stats_get = eth_stats_get,
1029 .stats_reset = eth_stats_reset,
1030 .xstats_reset = vhost_dev_xstats_reset,
1031 .xstats_get = vhost_dev_xstats_get,
1032 .xstats_get_names = vhost_dev_xstats_get_names,
1035 static struct rte_vdev_driver pmd_vhost_drv;
1038 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1039 int16_t queues, const unsigned int numa_node, uint64_t flags)
1041 const char *name = rte_vdev_device_name(dev);
1042 struct rte_eth_dev_data *data = NULL;
1043 struct pmd_internal *internal = NULL;
1044 struct rte_eth_dev *eth_dev = NULL;
1045 struct ether_addr *eth_addr = NULL;
1046 struct rte_vhost_vring_state *vring_state = NULL;
1047 struct internal_list *list = NULL;
1049 RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1052 /* now do all data allocation - for eth_dev structure and internal
1055 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1059 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1063 /* reserve an ethdev entry */
1064 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1065 if (eth_dev == NULL)
1068 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1069 if (eth_addr == NULL)
1071 *eth_addr = base_eth_addr;
1072 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1074 vring_state = rte_zmalloc_socket(name,
1075 sizeof(*vring_state), 0, numa_node);
1076 if (vring_state == NULL)
1079 /* now put it all together
1080 * - store queue data in internal,
1081 * - point eth_dev_data to internals
1082 * - and point eth_dev structure to new eth_dev_data structure
1084 internal = eth_dev->data->dev_private;
1085 internal->dev_name = strdup(name);
1086 if (internal->dev_name == NULL)
1088 internal->iface_name = strdup(iface_name);
1089 if (internal->iface_name == NULL)
1092 list->eth_dev = eth_dev;
1093 pthread_mutex_lock(&internal_list_lock);
1094 TAILQ_INSERT_TAIL(&internal_list, list, next);
1095 pthread_mutex_unlock(&internal_list_lock);
1097 rte_spinlock_init(&vring_state->lock);
1098 vring_states[eth_dev->data->port_id] = vring_state;
1100 /* We'll replace the 'data' originally allocated by eth_dev. So the
1101 * vhost PMD resources won't be shared between multi processes.
1103 rte_memcpy(data, eth_dev->data, sizeof(*data));
1104 eth_dev->data = data;
1106 data->nb_rx_queues = queues;
1107 data->nb_tx_queues = queues;
1108 internal->max_queues = queues;
1109 data->dev_link = pmd_link;
1110 data->mac_addrs = eth_addr;
1111 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1113 eth_dev->dev_ops = &ops;
1115 /* finally assign rx and tx ops */
1116 eth_dev->rx_pkt_burst = eth_vhost_rx;
1117 eth_dev->tx_pkt_burst = eth_vhost_tx;
1119 if (rte_vhost_driver_register(iface_name, flags))
1122 if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1123 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1127 if (rte_vhost_driver_start(iface_name) < 0) {
1128 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1133 return data->port_id;
1137 free(internal->iface_name);
1138 free(internal->dev_name);
1140 rte_free(vring_state);
1143 rte_eth_dev_release_port(eth_dev);
1152 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1154 const char **iface_name = extra_args;
1159 *iface_name = value;
1165 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1167 uint16_t *n = extra_args;
1169 if (value == NULL || extra_args == NULL)
1172 *n = (uint16_t)strtoul(value, NULL, 0);
1173 if (*n == USHRT_MAX && errno == ERANGE)
1180 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1182 struct rte_kvargs *kvlist = NULL;
1187 int client_mode = 0;
1188 int dequeue_zero_copy = 0;
1189 int iommu_support = 0;
1191 RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1192 rte_vdev_device_name(dev));
1194 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1198 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1199 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1200 &open_iface, &iface_name);
1208 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1209 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1210 &open_int, &queues);
1211 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1217 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1218 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1219 &open_int, &client_mode);
1224 flags |= RTE_VHOST_USER_CLIENT;
1227 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1228 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1229 &open_int, &dequeue_zero_copy);
1233 if (dequeue_zero_copy)
1234 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1237 if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1238 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1239 &open_int, &iommu_support);
1244 flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1247 if (dev->device.numa_node == SOCKET_ID_ANY)
1248 dev->device.numa_node = rte_socket_id();
1250 eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1254 rte_kvargs_free(kvlist);
1259 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1262 struct rte_eth_dev *eth_dev = NULL;
1264 name = rte_vdev_device_name(dev);
1265 RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1267 /* find an ethdev entry */
1268 eth_dev = rte_eth_dev_allocated(name);
1269 if (eth_dev == NULL)
1272 eth_dev_close(eth_dev);
1274 rte_free(vring_states[eth_dev->data->port_id]);
1275 vring_states[eth_dev->data->port_id] = NULL;
1277 rte_free(eth_dev->data);
1279 rte_eth_dev_release_port(eth_dev);
1284 static struct rte_vdev_driver pmd_vhost_drv = {
1285 .probe = rte_pmd_vhost_probe,
1286 .remove = rte_pmd_vhost_remove,
1289 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1290 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1291 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,