4 * Copyright (c) 2016 IGEL Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
47 #include "rte_eth_vhost.h"
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
51 #define ETH_VHOST_IFACE_ARG "iface"
52 #define ETH_VHOST_QUEUES_ARG "queues"
53 #define ETH_VHOST_CLIENT_ARG "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
55 #define VHOST_MAX_PKT_BURST 32
57 static const char *valid_arguments[] = {
61 ETH_VHOST_DEQUEUE_ZERO_COPY,
65 static struct ether_addr base_eth_addr = {
76 enum vhost_xstats_pkts {
77 VHOST_UNDERSIZE_PKT = 0,
82 VHOST_512_TO_1023_PKT,
83 VHOST_1024_TO_1522_PKT,
84 VHOST_1523_TO_MAX_PKT,
89 VHOST_ERRORS_FRAGMENTED,
91 VHOST_UNKNOWN_PROTOCOL,
99 uint64_t xstats[VHOST_XSTATS_MAX];
104 rte_atomic32_t allow_queuing;
105 rte_atomic32_t while_queuing;
106 struct pmd_internal *internal;
107 struct rte_mempool *mb_pool;
109 uint16_t virtqueue_id;
110 struct vhost_stats stats;
113 struct pmd_internal {
114 rte_atomic32_t dev_attached;
118 rte_atomic32_t started;
121 struct internal_list {
122 TAILQ_ENTRY(internal_list) next;
123 struct rte_eth_dev *eth_dev;
126 TAILQ_HEAD(internal_list_head, internal_list);
127 static struct internal_list_head internal_list =
128 TAILQ_HEAD_INITIALIZER(internal_list);
130 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
132 static struct rte_eth_link pmd_link = {
134 .link_duplex = ETH_LINK_FULL_DUPLEX,
135 .link_status = ETH_LINK_DOWN
138 struct rte_vhost_vring_state {
141 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
142 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
144 unsigned int max_vring;
147 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
149 #define VHOST_XSTATS_NAME_SIZE 64
151 struct vhost_xstats_name_off {
152 char name[VHOST_XSTATS_NAME_SIZE];
156 /* [rx]_is prepended to the name string here */
157 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
159 offsetof(struct vhost_queue, stats.pkts)},
161 offsetof(struct vhost_queue, stats.bytes)},
163 offsetof(struct vhost_queue, stats.missed_pkts)},
164 {"broadcast_packets",
165 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
166 {"multicast_packets",
167 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
169 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
170 {"undersize_packets",
171 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
173 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
174 {"size_65_to_127_packets",
175 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
176 {"size_128_to_255_packets",
177 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
178 {"size_256_to_511_packets",
179 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
180 {"size_512_to_1023_packets",
181 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
182 {"size_1024_to_1522_packets",
183 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
184 {"size_1523_to_max_packets",
185 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
186 {"errors_with_bad_CRC",
187 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
188 {"fragmented_errors",
189 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
191 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
192 {"unknown_protos_packets",
193 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
196 /* [tx]_ is prepended to the name string here */
197 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
199 offsetof(struct vhost_queue, stats.pkts)},
201 offsetof(struct vhost_queue, stats.bytes)},
203 offsetof(struct vhost_queue, stats.missed_pkts)},
204 {"broadcast_packets",
205 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
206 {"multicast_packets",
207 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
209 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
210 {"undersize_packets",
211 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
213 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
214 {"size_65_to_127_packets",
215 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
216 {"size_128_to_255_packets",
217 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
218 {"size_256_to_511_packets",
219 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
220 {"size_512_to_1023_packets",
221 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
222 {"size_1024_to_1522_packets",
223 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
224 {"size_1523_to_max_packets",
225 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
226 {"errors_with_bad_CRC",
227 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
230 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
231 sizeof(vhost_rxport_stat_strings[0]))
233 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
234 sizeof(vhost_txport_stat_strings[0]))
237 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
239 struct vhost_queue *vq = NULL;
242 for (i = 0; i < dev->data->nb_rx_queues; i++) {
243 vq = dev->data->rx_queues[i];
246 memset(&vq->stats, 0, sizeof(vq->stats));
248 for (i = 0; i < dev->data->nb_tx_queues; i++) {
249 vq = dev->data->tx_queues[i];
252 memset(&vq->stats, 0, sizeof(vq->stats));
257 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
258 struct rte_eth_xstat_name *xstats_names,
259 unsigned int limit __rte_unused)
263 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
267 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
268 snprintf(xstats_names[count].name,
269 sizeof(xstats_names[count].name),
270 "rx_%s", vhost_rxport_stat_strings[t].name);
273 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
274 snprintf(xstats_names[count].name,
275 sizeof(xstats_names[count].name),
276 "tx_%s", vhost_txport_stat_strings[t].name);
283 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
288 unsigned int count = 0;
289 struct vhost_queue *vq = NULL;
290 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
295 for (i = 0; i < dev->data->nb_rx_queues; i++) {
296 vq = dev->data->rx_queues[i];
299 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
300 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
301 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
303 for (i = 0; i < dev->data->nb_tx_queues; i++) {
304 vq = dev->data->tx_queues[i];
307 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
308 + vq->stats.missed_pkts
309 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
310 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
312 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
313 xstats[count].value = 0;
314 for (i = 0; i < dev->data->nb_rx_queues; i++) {
315 vq = dev->data->rx_queues[i];
318 xstats[count].value +=
319 *(uint64_t *)(((char *)vq)
320 + vhost_rxport_stat_strings[t].offset);
322 xstats[count].id = count;
325 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
326 xstats[count].value = 0;
327 for (i = 0; i < dev->data->nb_tx_queues; i++) {
328 vq = dev->data->tx_queues[i];
331 xstats[count].value +=
332 *(uint64_t *)(((char *)vq)
333 + vhost_txport_stat_strings[t].offset);
335 xstats[count].id = count;
342 vhost_count_multicast_broadcast(struct vhost_queue *vq,
343 struct rte_mbuf *mbuf)
345 struct ether_addr *ea = NULL;
346 struct vhost_stats *pstats = &vq->stats;
348 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
349 if (is_multicast_ether_addr(ea)) {
350 if (is_broadcast_ether_addr(ea))
351 pstats->xstats[VHOST_BROADCAST_PKT]++;
353 pstats->xstats[VHOST_MULTICAST_PKT]++;
358 vhost_update_packet_xstats(struct vhost_queue *vq,
359 struct rte_mbuf **bufs,
362 uint32_t pkt_len = 0;
365 struct vhost_stats *pstats = &vq->stats;
367 for (i = 0; i < count ; i++) {
368 pkt_len = bufs[i]->pkt_len;
370 pstats->xstats[VHOST_64_PKT]++;
371 } else if (pkt_len > 64 && pkt_len < 1024) {
372 index = (sizeof(pkt_len) * 8)
373 - __builtin_clz(pkt_len) - 5;
374 pstats->xstats[index]++;
377 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
378 else if (pkt_len <= 1522)
379 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
380 else if (pkt_len > 1522)
381 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
383 vhost_count_multicast_broadcast(vq, bufs[i]);
388 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
390 struct vhost_queue *r = q;
391 uint16_t i, nb_rx = 0;
392 uint16_t nb_receive = nb_bufs;
394 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
397 rte_atomic32_set(&r->while_queuing, 1);
399 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
402 /* Dequeue packets from guest TX queue */
405 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
406 VHOST_MAX_PKT_BURST);
408 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
409 r->mb_pool, &bufs[nb_rx],
413 nb_receive -= nb_pkts;
418 r->stats.pkts += nb_rx;
420 for (i = 0; likely(i < nb_rx); i++) {
421 bufs[i]->port = r->port;
422 r->stats.bytes += bufs[i]->pkt_len;
425 vhost_update_packet_xstats(r, bufs, nb_rx);
428 rte_atomic32_set(&r->while_queuing, 0);
434 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
436 struct vhost_queue *r = q;
437 uint16_t i, nb_tx = 0;
438 uint16_t nb_send = nb_bufs;
440 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
443 rte_atomic32_set(&r->while_queuing, 1);
445 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
448 /* Enqueue packets to guest RX queue */
451 uint16_t num = (uint16_t)RTE_MIN(nb_send,
452 VHOST_MAX_PKT_BURST);
454 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
463 r->stats.pkts += nb_tx;
464 r->stats.missed_pkts += nb_bufs - nb_tx;
466 for (i = 0; likely(i < nb_tx); i++)
467 r->stats.bytes += bufs[i]->pkt_len;
469 vhost_update_packet_xstats(r, bufs, nb_tx);
471 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
472 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
473 * are increased when packets are not transmitted successfully.
475 for (i = nb_tx; i < nb_bufs; i++)
476 vhost_count_multicast_broadcast(r, bufs[i]);
478 for (i = 0; likely(i < nb_tx); i++)
479 rte_pktmbuf_free(bufs[i]);
481 rte_atomic32_set(&r->while_queuing, 0);
487 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
492 static inline struct internal_list *
493 find_internal_resource(char *ifname)
496 struct internal_list *list;
497 struct pmd_internal *internal;
502 pthread_mutex_lock(&internal_list_lock);
504 TAILQ_FOREACH(list, &internal_list, next) {
505 internal = list->eth_dev->data->dev_private;
506 if (!strcmp(internal->iface_name, ifname)) {
512 pthread_mutex_unlock(&internal_list_lock);
521 update_queuing_status(struct rte_eth_dev *dev)
523 struct pmd_internal *internal = dev->data->dev_private;
524 struct vhost_queue *vq;
526 int allow_queuing = 1;
528 if (rte_atomic32_read(&internal->started) == 0 ||
529 rte_atomic32_read(&internal->dev_attached) == 0)
532 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
533 for (i = 0; i < dev->data->nb_rx_queues; i++) {
534 vq = dev->data->rx_queues[i];
537 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
538 while (rte_atomic32_read(&vq->while_queuing))
542 for (i = 0; i < dev->data->nb_tx_queues; i++) {
543 vq = dev->data->tx_queues[i];
546 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
547 while (rte_atomic32_read(&vq->while_queuing))
555 struct rte_eth_dev *eth_dev;
556 struct internal_list *list;
557 struct pmd_internal *internal;
558 struct vhost_queue *vq;
560 char ifname[PATH_MAX];
561 #ifdef RTE_LIBRTE_VHOST_NUMA
565 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
566 list = find_internal_resource(ifname);
568 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
572 eth_dev = list->eth_dev;
573 internal = eth_dev->data->dev_private;
575 #ifdef RTE_LIBRTE_VHOST_NUMA
576 newnode = rte_vhost_get_numa_node(vid);
578 eth_dev->data->numa_node = newnode;
581 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
582 vq = eth_dev->data->rx_queues[i];
586 vq->internal = internal;
587 vq->port = eth_dev->data->port_id;
589 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
590 vq = eth_dev->data->tx_queues[i];
594 vq->internal = internal;
595 vq->port = eth_dev->data->port_id;
598 for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
599 rte_vhost_enable_guest_notification(vid, i, 0);
601 rte_vhost_get_mtu(vid, ð_dev->data->mtu);
603 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
605 rte_atomic32_set(&internal->dev_attached, 1);
606 update_queuing_status(eth_dev);
608 RTE_LOG(INFO, PMD, "New connection established\n");
610 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
616 destroy_device(int vid)
618 struct rte_eth_dev *eth_dev;
619 struct pmd_internal *internal;
620 struct vhost_queue *vq;
621 struct internal_list *list;
622 char ifname[PATH_MAX];
624 struct rte_vhost_vring_state *state;
626 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
627 list = find_internal_resource(ifname);
629 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
632 eth_dev = list->eth_dev;
633 internal = eth_dev->data->dev_private;
635 rte_atomic32_set(&internal->dev_attached, 0);
636 update_queuing_status(eth_dev);
638 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
640 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
641 vq = eth_dev->data->rx_queues[i];
646 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
647 vq = eth_dev->data->tx_queues[i];
653 state = vring_states[eth_dev->data->port_id];
654 rte_spinlock_lock(&state->lock);
655 for (i = 0; i <= state->max_vring; i++) {
656 state->cur[i] = false;
657 state->seen[i] = false;
659 state->max_vring = 0;
660 rte_spinlock_unlock(&state->lock);
662 RTE_LOG(INFO, PMD, "Connection closed\n");
664 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
668 vring_state_changed(int vid, uint16_t vring, int enable)
670 struct rte_vhost_vring_state *state;
671 struct rte_eth_dev *eth_dev;
672 struct internal_list *list;
673 char ifname[PATH_MAX];
675 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
676 list = find_internal_resource(ifname);
678 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
682 eth_dev = list->eth_dev;
684 state = vring_states[eth_dev->data->port_id];
685 rte_spinlock_lock(&state->lock);
686 state->cur[vring] = enable;
687 state->max_vring = RTE_MAX(vring, state->max_vring);
688 rte_spinlock_unlock(&state->lock);
690 RTE_LOG(INFO, PMD, "vring%u is %s\n",
691 vring, enable ? "enabled" : "disabled");
693 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
698 static struct vhost_device_ops vhost_ops = {
699 .new_device = new_device,
700 .destroy_device = destroy_device,
701 .vring_state_changed = vring_state_changed,
705 rte_eth_vhost_get_queue_event(uint8_t port_id,
706 struct rte_eth_vhost_queue_event *event)
708 struct rte_vhost_vring_state *state;
712 if (port_id >= RTE_MAX_ETHPORTS) {
713 RTE_LOG(ERR, PMD, "Invalid port id\n");
717 state = vring_states[port_id];
719 RTE_LOG(ERR, PMD, "Unused port\n");
723 rte_spinlock_lock(&state->lock);
724 for (i = 0; i <= state->max_vring; i++) {
725 idx = state->index++ % (state->max_vring + 1);
727 if (state->cur[idx] != state->seen[idx]) {
728 state->seen[idx] = state->cur[idx];
729 event->queue_id = idx / 2;
731 event->enable = state->cur[idx];
732 rte_spinlock_unlock(&state->lock);
736 rte_spinlock_unlock(&state->lock);
742 rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
744 struct internal_list *list;
745 struct rte_eth_dev *eth_dev;
746 struct vhost_queue *vq;
749 if (!rte_eth_dev_is_valid_port(port_id))
752 pthread_mutex_lock(&internal_list_lock);
754 TAILQ_FOREACH(list, &internal_list, next) {
755 eth_dev = list->eth_dev;
756 if (eth_dev->data->port_id == port_id) {
757 vq = eth_dev->data->rx_queues[0];
765 pthread_mutex_unlock(&internal_list_lock);
771 eth_dev_start(struct rte_eth_dev *dev)
773 struct pmd_internal *internal = dev->data->dev_private;
775 rte_atomic32_set(&internal->started, 1);
776 update_queuing_status(dev);
782 eth_dev_stop(struct rte_eth_dev *dev)
784 struct pmd_internal *internal = dev->data->dev_private;
786 rte_atomic32_set(&internal->started, 0);
787 update_queuing_status(dev);
791 eth_dev_close(struct rte_eth_dev *dev)
793 struct pmd_internal *internal;
794 struct internal_list *list;
797 internal = dev->data->dev_private;
803 rte_vhost_driver_unregister(internal->iface_name);
805 list = find_internal_resource(internal->iface_name);
809 pthread_mutex_lock(&internal_list_lock);
810 TAILQ_REMOVE(&internal_list, list, next);
811 pthread_mutex_unlock(&internal_list_lock);
814 for (i = 0; i < dev->data->nb_rx_queues; i++)
815 rte_free(dev->data->rx_queues[i]);
816 for (i = 0; i < dev->data->nb_tx_queues; i++)
817 rte_free(dev->data->tx_queues[i]);
819 rte_free(dev->data->mac_addrs);
820 free(internal->dev_name);
821 free(internal->iface_name);
824 dev->data->dev_private = NULL;
828 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
829 uint16_t nb_rx_desc __rte_unused,
830 unsigned int socket_id,
831 const struct rte_eth_rxconf *rx_conf __rte_unused,
832 struct rte_mempool *mb_pool)
834 struct vhost_queue *vq;
836 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
837 RTE_CACHE_LINE_SIZE, socket_id);
839 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
843 vq->mb_pool = mb_pool;
844 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
845 dev->data->rx_queues[rx_queue_id] = vq;
851 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
852 uint16_t nb_tx_desc __rte_unused,
853 unsigned int socket_id,
854 const struct rte_eth_txconf *tx_conf __rte_unused)
856 struct vhost_queue *vq;
858 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
859 RTE_CACHE_LINE_SIZE, socket_id);
861 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
865 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
866 dev->data->tx_queues[tx_queue_id] = vq;
872 eth_dev_info(struct rte_eth_dev *dev,
873 struct rte_eth_dev_info *dev_info)
875 struct pmd_internal *internal;
877 internal = dev->data->dev_private;
878 if (internal == NULL) {
879 RTE_LOG(ERR, PMD, "Invalid device specified\n");
883 dev_info->max_mac_addrs = 1;
884 dev_info->max_rx_pktlen = (uint32_t)-1;
885 dev_info->max_rx_queues = internal->max_queues;
886 dev_info->max_tx_queues = internal->max_queues;
887 dev_info->min_rx_bufsize = 0;
891 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
894 unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
895 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
896 struct vhost_queue *vq;
898 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
899 i < dev->data->nb_rx_queues; i++) {
900 if (dev->data->rx_queues[i] == NULL)
902 vq = dev->data->rx_queues[i];
903 stats->q_ipackets[i] = vq->stats.pkts;
904 rx_total += stats->q_ipackets[i];
906 stats->q_ibytes[i] = vq->stats.bytes;
907 rx_total_bytes += stats->q_ibytes[i];
910 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
911 i < dev->data->nb_tx_queues; i++) {
912 if (dev->data->tx_queues[i] == NULL)
914 vq = dev->data->tx_queues[i];
915 stats->q_opackets[i] = vq->stats.pkts;
916 tx_missed_total += vq->stats.missed_pkts;
917 tx_total += stats->q_opackets[i];
919 stats->q_obytes[i] = vq->stats.bytes;
920 tx_total_bytes += stats->q_obytes[i];
923 stats->ipackets = rx_total;
924 stats->opackets = tx_total;
925 stats->oerrors = tx_missed_total;
926 stats->ibytes = rx_total_bytes;
927 stats->obytes = tx_total_bytes;
931 eth_stats_reset(struct rte_eth_dev *dev)
933 struct vhost_queue *vq;
936 for (i = 0; i < dev->data->nb_rx_queues; i++) {
937 if (dev->data->rx_queues[i] == NULL)
939 vq = dev->data->rx_queues[i];
943 for (i = 0; i < dev->data->nb_tx_queues; i++) {
944 if (dev->data->tx_queues[i] == NULL)
946 vq = dev->data->tx_queues[i];
949 vq->stats.missed_pkts = 0;
954 eth_queue_release(void *q)
960 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
963 * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
964 * and releases mbuf, so nothing to cleanup.
970 eth_link_update(struct rte_eth_dev *dev __rte_unused,
971 int wait_to_complete __rte_unused)
977 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
979 struct vhost_queue *vq;
981 vq = dev->data->rx_queues[rx_queue_id];
985 return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
988 static const struct eth_dev_ops ops = {
989 .dev_start = eth_dev_start,
990 .dev_stop = eth_dev_stop,
991 .dev_close = eth_dev_close,
992 .dev_configure = eth_dev_configure,
993 .dev_infos_get = eth_dev_info,
994 .rx_queue_setup = eth_rx_queue_setup,
995 .tx_queue_setup = eth_tx_queue_setup,
996 .rx_queue_release = eth_queue_release,
997 .tx_queue_release = eth_queue_release,
998 .tx_done_cleanup = eth_tx_done_cleanup,
999 .rx_queue_count = eth_rx_queue_count,
1000 .link_update = eth_link_update,
1001 .stats_get = eth_stats_get,
1002 .stats_reset = eth_stats_reset,
1003 .xstats_reset = vhost_dev_xstats_reset,
1004 .xstats_get = vhost_dev_xstats_get,
1005 .xstats_get_names = vhost_dev_xstats_get_names,
1008 static struct rte_vdev_driver pmd_vhost_drv;
1011 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1012 int16_t queues, const unsigned int numa_node, uint64_t flags)
1014 const char *name = rte_vdev_device_name(dev);
1015 struct rte_eth_dev_data *data = NULL;
1016 struct pmd_internal *internal = NULL;
1017 struct rte_eth_dev *eth_dev = NULL;
1018 struct ether_addr *eth_addr = NULL;
1019 struct rte_vhost_vring_state *vring_state = NULL;
1020 struct internal_list *list = NULL;
1022 RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1025 /* now do all data allocation - for eth_dev structure and internal
1028 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1032 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1036 /* reserve an ethdev entry */
1037 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1038 if (eth_dev == NULL)
1041 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1042 if (eth_addr == NULL)
1044 *eth_addr = base_eth_addr;
1045 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1047 vring_state = rte_zmalloc_socket(name,
1048 sizeof(*vring_state), 0, numa_node);
1049 if (vring_state == NULL)
1052 /* now put it all together
1053 * - store queue data in internal,
1054 * - point eth_dev_data to internals
1055 * - and point eth_dev structure to new eth_dev_data structure
1057 internal = eth_dev->data->dev_private;
1058 internal->dev_name = strdup(name);
1059 if (internal->dev_name == NULL)
1061 internal->iface_name = strdup(iface_name);
1062 if (internal->iface_name == NULL)
1065 list->eth_dev = eth_dev;
1066 pthread_mutex_lock(&internal_list_lock);
1067 TAILQ_INSERT_TAIL(&internal_list, list, next);
1068 pthread_mutex_unlock(&internal_list_lock);
1070 rte_spinlock_init(&vring_state->lock);
1071 vring_states[eth_dev->data->port_id] = vring_state;
1073 /* We'll replace the 'data' originally allocated by eth_dev. So the
1074 * vhost PMD resources won't be shared between multi processes.
1076 rte_memcpy(data, eth_dev->data, sizeof(*data));
1077 eth_dev->data = data;
1079 data->nb_rx_queues = queues;
1080 data->nb_tx_queues = queues;
1081 internal->max_queues = queues;
1082 data->dev_link = pmd_link;
1083 data->mac_addrs = eth_addr;
1085 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1087 eth_dev->dev_ops = &ops;
1089 /* finally assign rx and tx ops */
1090 eth_dev->rx_pkt_burst = eth_vhost_rx;
1091 eth_dev->tx_pkt_burst = eth_vhost_tx;
1093 if (rte_vhost_driver_register(iface_name, flags))
1096 if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1097 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1101 if (rte_vhost_driver_start(iface_name) < 0) {
1102 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1107 return data->port_id;
1111 free(internal->iface_name);
1112 free(internal->dev_name);
1114 rte_free(vring_state);
1117 rte_eth_dev_release_port(eth_dev);
1126 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1128 const char **iface_name = extra_args;
1133 *iface_name = value;
1139 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1141 uint16_t *n = extra_args;
1143 if (value == NULL || extra_args == NULL)
1146 *n = (uint16_t)strtoul(value, NULL, 0);
1147 if (*n == USHRT_MAX && errno == ERANGE)
1154 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1156 struct rte_kvargs *kvlist = NULL;
1161 int client_mode = 0;
1162 int dequeue_zero_copy = 0;
1164 RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1165 rte_vdev_device_name(dev));
1167 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1171 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1172 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1173 &open_iface, &iface_name);
1181 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1182 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1183 &open_int, &queues);
1184 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1190 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1191 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1192 &open_int, &client_mode);
1197 flags |= RTE_VHOST_USER_CLIENT;
1200 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1201 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1202 &open_int, &dequeue_zero_copy);
1206 if (dequeue_zero_copy)
1207 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1210 if (dev->device.numa_node == SOCKET_ID_ANY)
1211 dev->device.numa_node = rte_socket_id();
1213 eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1217 rte_kvargs_free(kvlist);
1222 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1225 struct rte_eth_dev *eth_dev = NULL;
1227 name = rte_vdev_device_name(dev);
1228 RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1230 /* find an ethdev entry */
1231 eth_dev = rte_eth_dev_allocated(name);
1232 if (eth_dev == NULL)
1235 eth_dev_close(eth_dev);
1237 rte_free(vring_states[eth_dev->data->port_id]);
1238 vring_states[eth_dev->data->port_id] = NULL;
1240 rte_free(eth_dev->data);
1242 rte_eth_dev_release_port(eth_dev);
1247 static struct rte_vdev_driver pmd_vhost_drv = {
1248 .probe = rte_pmd_vhost_probe,
1249 .remove = rte_pmd_vhost_remove,
1252 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1253 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1254 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,