4 * Copyright (c) 2016 IGEL Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #ifdef RTE_LIBRTE_VHOST_NUMA
41 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
45 #include <rte_kvargs.h>
46 #include <rte_virtio_net.h>
47 #include <rte_spinlock.h>
49 #include "rte_eth_vhost.h"
51 #define ETH_VHOST_IFACE_ARG "iface"
52 #define ETH_VHOST_QUEUES_ARG "queues"
53 #define ETH_VHOST_CLIENT_ARG "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
56 static const char *valid_arguments[] = {
60 ETH_VHOST_DEQUEUE_ZERO_COPY,
64 static struct ether_addr base_eth_addr = {
75 enum vhost_xstats_pkts {
76 VHOST_UNDERSIZE_PKT = 0,
81 VHOST_512_TO_1023_PKT,
82 VHOST_1024_TO_1522_PKT,
83 VHOST_1523_TO_MAX_PKT,
88 VHOST_ERRORS_FRAGMENTED,
90 VHOST_UNKNOWN_PROTOCOL,
98 uint64_t xstats[VHOST_XSTATS_MAX];
103 rte_atomic32_t allow_queuing;
104 rte_atomic32_t while_queuing;
105 struct pmd_internal *internal;
106 struct rte_mempool *mb_pool;
108 uint16_t virtqueue_id;
109 struct vhost_stats stats;
112 struct pmd_internal {
113 rte_atomic32_t dev_attached;
117 rte_atomic32_t started;
120 struct internal_list {
121 TAILQ_ENTRY(internal_list) next;
122 struct rte_eth_dev *eth_dev;
125 TAILQ_HEAD(internal_list_head, internal_list);
126 static struct internal_list_head internal_list =
127 TAILQ_HEAD_INITIALIZER(internal_list);
129 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
131 static rte_atomic16_t nb_started_ports;
132 static pthread_t session_th;
134 static struct rte_eth_link pmd_link = {
136 .link_duplex = ETH_LINK_FULL_DUPLEX,
137 .link_status = ETH_LINK_DOWN
140 struct rte_vhost_vring_state {
143 bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
144 bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
146 unsigned int max_vring;
149 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
151 #define VHOST_XSTATS_NAME_SIZE 64
153 struct vhost_xstats_name_off {
154 char name[VHOST_XSTATS_NAME_SIZE];
158 /* [rx]_is prepended to the name string here */
159 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
161 offsetof(struct vhost_queue, stats.pkts)},
163 offsetof(struct vhost_queue, stats.bytes)},
165 offsetof(struct vhost_queue, stats.missed_pkts)},
166 {"broadcast_packets",
167 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
168 {"multicast_packets",
169 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
171 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
172 {"undersize_packets",
173 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
175 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
176 {"size_65_to_127_packets",
177 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
178 {"size_128_to_255_packets",
179 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
180 {"size_256_to_511_packets",
181 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
182 {"size_512_to_1023_packets",
183 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
184 {"size_1024_to_1522_packets",
185 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
186 {"size_1523_to_max_packets",
187 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
188 {"errors_with_bad_CRC",
189 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
190 {"fragmented_errors",
191 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
193 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
194 {"unknown_protos_packets",
195 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
198 /* [tx]_ is prepended to the name string here */
199 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
201 offsetof(struct vhost_queue, stats.pkts)},
203 offsetof(struct vhost_queue, stats.bytes)},
205 offsetof(struct vhost_queue, stats.missed_pkts)},
206 {"broadcast_packets",
207 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
208 {"multicast_packets",
209 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
211 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
212 {"undersize_packets",
213 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
215 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
216 {"size_65_to_127_packets",
217 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
218 {"size_128_to_255_packets",
219 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
220 {"size_256_to_511_packets",
221 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
222 {"size_512_to_1023_packets",
223 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
224 {"size_1024_to_1522_packets",
225 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
226 {"size_1523_to_max_packets",
227 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
228 {"errors_with_bad_CRC",
229 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
232 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
233 sizeof(vhost_rxport_stat_strings[0]))
235 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
236 sizeof(vhost_txport_stat_strings[0]))
239 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
241 struct vhost_queue *vq = NULL;
244 for (i = 0; i < dev->data->nb_rx_queues; i++) {
245 vq = dev->data->rx_queues[i];
248 memset(&vq->stats, 0, sizeof(vq->stats));
250 for (i = 0; i < dev->data->nb_tx_queues; i++) {
251 vq = dev->data->tx_queues[i];
254 memset(&vq->stats, 0, sizeof(vq->stats));
259 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
260 struct rte_eth_xstat_name *xstats_names,
261 unsigned int limit __rte_unused)
265 int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
269 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
270 snprintf(xstats_names[count].name,
271 sizeof(xstats_names[count].name),
272 "rx_%s", vhost_rxport_stat_strings[t].name);
275 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
276 snprintf(xstats_names[count].name,
277 sizeof(xstats_names[count].name),
278 "tx_%s", vhost_txport_stat_strings[t].name);
285 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
290 unsigned int count = 0;
291 struct vhost_queue *vq = NULL;
292 unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
297 for (i = 0; i < dev->data->nb_rx_queues; i++) {
298 vq = dev->data->rx_queues[i];
301 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
302 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
303 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
305 for (i = 0; i < dev->data->nb_tx_queues; i++) {
306 vq = dev->data->tx_queues[i];
309 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
310 + vq->stats.missed_pkts
311 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
312 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
314 for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
315 xstats[count].value = 0;
316 for (i = 0; i < dev->data->nb_rx_queues; i++) {
317 vq = dev->data->rx_queues[i];
320 xstats[count].value +=
321 *(uint64_t *)(((char *)vq)
322 + vhost_rxport_stat_strings[t].offset);
324 xstats[count].id = count;
327 for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
328 xstats[count].value = 0;
329 for (i = 0; i < dev->data->nb_tx_queues; i++) {
330 vq = dev->data->tx_queues[i];
333 xstats[count].value +=
334 *(uint64_t *)(((char *)vq)
335 + vhost_txport_stat_strings[t].offset);
337 xstats[count].id = count;
344 vhost_count_multicast_broadcast(struct vhost_queue *vq,
345 struct rte_mbuf *mbuf)
347 struct ether_addr *ea = NULL;
348 struct vhost_stats *pstats = &vq->stats;
350 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
351 if (is_multicast_ether_addr(ea)) {
352 if (is_broadcast_ether_addr(ea))
353 pstats->xstats[VHOST_BROADCAST_PKT]++;
355 pstats->xstats[VHOST_MULTICAST_PKT]++;
360 vhost_update_packet_xstats(struct vhost_queue *vq,
361 struct rte_mbuf **bufs,
364 uint32_t pkt_len = 0;
367 struct vhost_stats *pstats = &vq->stats;
369 for (i = 0; i < count ; i++) {
370 pkt_len = bufs[i]->pkt_len;
372 pstats->xstats[VHOST_64_PKT]++;
373 } else if (pkt_len > 64 && pkt_len < 1024) {
374 index = (sizeof(pkt_len) * 8)
375 - __builtin_clz(pkt_len) - 5;
376 pstats->xstats[index]++;
379 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
380 else if (pkt_len <= 1522)
381 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
382 else if (pkt_len > 1522)
383 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
385 vhost_count_multicast_broadcast(vq, bufs[i]);
390 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
392 struct vhost_queue *r = q;
393 uint16_t i, nb_rx = 0;
395 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
398 rte_atomic32_set(&r->while_queuing, 1);
400 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
403 /* Dequeue packets from guest TX queue */
404 nb_rx = rte_vhost_dequeue_burst(r->vid,
405 r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
407 r->stats.pkts += nb_rx;
409 for (i = 0; likely(i < nb_rx); i++) {
410 bufs[i]->port = r->port;
411 r->stats.bytes += bufs[i]->pkt_len;
414 vhost_update_packet_xstats(r, bufs, nb_rx);
417 rte_atomic32_set(&r->while_queuing, 0);
423 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
425 struct vhost_queue *r = q;
426 uint16_t i, nb_tx = 0;
428 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
431 rte_atomic32_set(&r->while_queuing, 1);
433 if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
436 /* Enqueue packets to guest RX queue */
437 nb_tx = rte_vhost_enqueue_burst(r->vid,
438 r->virtqueue_id, bufs, nb_bufs);
440 r->stats.pkts += nb_tx;
441 r->stats.missed_pkts += nb_bufs - nb_tx;
443 for (i = 0; likely(i < nb_tx); i++)
444 r->stats.bytes += bufs[i]->pkt_len;
446 vhost_update_packet_xstats(r, bufs, nb_tx);
448 /* According to RFC2863 page42 section ifHCOutMulticastPkts and
449 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
450 * are increased when packets are not transmitted successfully.
452 for (i = nb_tx; i < nb_bufs; i++)
453 vhost_count_multicast_broadcast(r, bufs[i]);
455 for (i = 0; likely(i < nb_tx); i++)
456 rte_pktmbuf_free(bufs[i]);
458 rte_atomic32_set(&r->while_queuing, 0);
464 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
469 static inline struct internal_list *
470 find_internal_resource(char *ifname)
473 struct internal_list *list;
474 struct pmd_internal *internal;
479 pthread_mutex_lock(&internal_list_lock);
481 TAILQ_FOREACH(list, &internal_list, next) {
482 internal = list->eth_dev->data->dev_private;
483 if (!strcmp(internal->iface_name, ifname)) {
489 pthread_mutex_unlock(&internal_list_lock);
498 update_queuing_status(struct rte_eth_dev *dev)
500 struct pmd_internal *internal = dev->data->dev_private;
501 struct vhost_queue *vq;
503 int allow_queuing = 1;
505 if (rte_atomic32_read(&internal->started) == 0 ||
506 rte_atomic32_read(&internal->dev_attached) == 0)
509 /* Wait until rx/tx_pkt_burst stops accessing vhost device */
510 for (i = 0; i < dev->data->nb_rx_queues; i++) {
511 vq = dev->data->rx_queues[i];
514 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
515 while (rte_atomic32_read(&vq->while_queuing))
519 for (i = 0; i < dev->data->nb_tx_queues; i++) {
520 vq = dev->data->tx_queues[i];
523 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
524 while (rte_atomic32_read(&vq->while_queuing))
532 struct rte_eth_dev *eth_dev;
533 struct internal_list *list;
534 struct pmd_internal *internal;
535 struct vhost_queue *vq;
537 char ifname[PATH_MAX];
538 #ifdef RTE_LIBRTE_VHOST_NUMA
542 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
543 list = find_internal_resource(ifname);
545 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
549 eth_dev = list->eth_dev;
550 internal = eth_dev->data->dev_private;
552 #ifdef RTE_LIBRTE_VHOST_NUMA
553 newnode = rte_vhost_get_numa_node(vid);
555 eth_dev->data->numa_node = newnode;
558 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
559 vq = eth_dev->data->rx_queues[i];
563 vq->internal = internal;
564 vq->port = eth_dev->data->port_id;
566 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
567 vq = eth_dev->data->tx_queues[i];
571 vq->internal = internal;
572 vq->port = eth_dev->data->port_id;
575 for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
576 rte_vhost_enable_guest_notification(vid, i, 0);
578 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
580 rte_atomic32_set(&internal->dev_attached, 1);
581 update_queuing_status(eth_dev);
583 RTE_LOG(INFO, PMD, "New connection established\n");
585 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
591 destroy_device(int vid)
593 struct rte_eth_dev *eth_dev;
594 struct pmd_internal *internal;
595 struct vhost_queue *vq;
596 struct internal_list *list;
597 char ifname[PATH_MAX];
599 struct rte_vhost_vring_state *state;
601 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
602 list = find_internal_resource(ifname);
604 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
607 eth_dev = list->eth_dev;
608 internal = eth_dev->data->dev_private;
610 rte_atomic32_set(&internal->dev_attached, 0);
611 update_queuing_status(eth_dev);
613 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
615 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
616 vq = eth_dev->data->rx_queues[i];
621 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
622 vq = eth_dev->data->tx_queues[i];
628 state = vring_states[eth_dev->data->port_id];
629 rte_spinlock_lock(&state->lock);
630 for (i = 0; i <= state->max_vring; i++) {
631 state->cur[i] = false;
632 state->seen[i] = false;
634 state->max_vring = 0;
635 rte_spinlock_unlock(&state->lock);
637 RTE_LOG(INFO, PMD, "Connection closed\n");
639 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
643 vring_state_changed(int vid, uint16_t vring, int enable)
645 struct rte_vhost_vring_state *state;
646 struct rte_eth_dev *eth_dev;
647 struct internal_list *list;
648 char ifname[PATH_MAX];
650 rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
651 list = find_internal_resource(ifname);
653 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
657 eth_dev = list->eth_dev;
659 state = vring_states[eth_dev->data->port_id];
660 rte_spinlock_lock(&state->lock);
661 state->cur[vring] = enable;
662 state->max_vring = RTE_MAX(vring, state->max_vring);
663 rte_spinlock_unlock(&state->lock);
665 RTE_LOG(INFO, PMD, "vring%u is %s\n",
666 vring, enable ? "enabled" : "disabled");
668 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
674 rte_eth_vhost_get_queue_event(uint8_t port_id,
675 struct rte_eth_vhost_queue_event *event)
677 struct rte_vhost_vring_state *state;
681 if (port_id >= RTE_MAX_ETHPORTS) {
682 RTE_LOG(ERR, PMD, "Invalid port id\n");
686 state = vring_states[port_id];
688 RTE_LOG(ERR, PMD, "Unused port\n");
692 rte_spinlock_lock(&state->lock);
693 for (i = 0; i <= state->max_vring; i++) {
694 idx = state->index++ % (state->max_vring + 1);
696 if (state->cur[idx] != state->seen[idx]) {
697 state->seen[idx] = state->cur[idx];
698 event->queue_id = idx / 2;
700 event->enable = state->cur[idx];
701 rte_spinlock_unlock(&state->lock);
705 rte_spinlock_unlock(&state->lock);
711 rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
713 struct internal_list *list;
714 struct rte_eth_dev *eth_dev;
715 struct vhost_queue *vq;
718 if (!rte_eth_dev_is_valid_port(port_id))
721 pthread_mutex_lock(&internal_list_lock);
723 TAILQ_FOREACH(list, &internal_list, next) {
724 eth_dev = list->eth_dev;
725 if (eth_dev->data->port_id == port_id) {
726 vq = eth_dev->data->rx_queues[0];
734 pthread_mutex_unlock(&internal_list_lock);
740 vhost_driver_session(void *param __rte_unused)
742 static struct virtio_net_device_ops vhost_ops;
744 /* set vhost arguments */
745 vhost_ops.new_device = new_device;
746 vhost_ops.destroy_device = destroy_device;
747 vhost_ops.vring_state_changed = vring_state_changed;
748 if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
749 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
751 /* start event handling */
752 rte_vhost_driver_session_start();
758 vhost_driver_session_start(void)
762 ret = pthread_create(&session_th,
763 NULL, vhost_driver_session, NULL);
765 RTE_LOG(ERR, PMD, "Can't create a thread\n");
771 vhost_driver_session_stop(void)
775 ret = pthread_cancel(session_th);
777 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
779 ret = pthread_join(session_th, NULL);
781 RTE_LOG(ERR, PMD, "Can't join the thread\n");
785 eth_dev_start(struct rte_eth_dev *dev)
787 struct pmd_internal *internal = dev->data->dev_private;
789 rte_atomic32_set(&internal->started, 1);
790 update_queuing_status(dev);
796 eth_dev_stop(struct rte_eth_dev *dev)
798 struct pmd_internal *internal = dev->data->dev_private;
800 rte_atomic32_set(&internal->started, 0);
801 update_queuing_status(dev);
805 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
806 uint16_t nb_rx_desc __rte_unused,
807 unsigned int socket_id,
808 const struct rte_eth_rxconf *rx_conf __rte_unused,
809 struct rte_mempool *mb_pool)
811 struct vhost_queue *vq;
813 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
814 RTE_CACHE_LINE_SIZE, socket_id);
816 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
820 vq->mb_pool = mb_pool;
821 vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
822 dev->data->rx_queues[rx_queue_id] = vq;
828 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
829 uint16_t nb_tx_desc __rte_unused,
830 unsigned int socket_id,
831 const struct rte_eth_txconf *tx_conf __rte_unused)
833 struct vhost_queue *vq;
835 vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
836 RTE_CACHE_LINE_SIZE, socket_id);
838 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
842 vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
843 dev->data->tx_queues[tx_queue_id] = vq;
849 eth_dev_info(struct rte_eth_dev *dev,
850 struct rte_eth_dev_info *dev_info)
852 struct pmd_internal *internal;
854 internal = dev->data->dev_private;
855 if (internal == NULL) {
856 RTE_LOG(ERR, PMD, "Invalid device specified\n");
860 dev_info->max_mac_addrs = 1;
861 dev_info->max_rx_pktlen = (uint32_t)-1;
862 dev_info->max_rx_queues = internal->max_queues;
863 dev_info->max_tx_queues = internal->max_queues;
864 dev_info->min_rx_bufsize = 0;
868 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
871 unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
872 unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
873 struct vhost_queue *vq;
875 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
876 i < dev->data->nb_rx_queues; i++) {
877 if (dev->data->rx_queues[i] == NULL)
879 vq = dev->data->rx_queues[i];
880 stats->q_ipackets[i] = vq->stats.pkts;
881 rx_total += stats->q_ipackets[i];
883 stats->q_ibytes[i] = vq->stats.bytes;
884 rx_total_bytes += stats->q_ibytes[i];
887 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
888 i < dev->data->nb_tx_queues; i++) {
889 if (dev->data->tx_queues[i] == NULL)
891 vq = dev->data->tx_queues[i];
892 stats->q_opackets[i] = vq->stats.pkts;
893 tx_missed_total += vq->stats.missed_pkts;
894 tx_total += stats->q_opackets[i];
896 stats->q_obytes[i] = vq->stats.bytes;
897 tx_total_bytes += stats->q_obytes[i];
900 stats->ipackets = rx_total;
901 stats->opackets = tx_total;
902 stats->oerrors = tx_missed_total;
903 stats->ibytes = rx_total_bytes;
904 stats->obytes = tx_total_bytes;
908 eth_stats_reset(struct rte_eth_dev *dev)
910 struct vhost_queue *vq;
913 for (i = 0; i < dev->data->nb_rx_queues; i++) {
914 if (dev->data->rx_queues[i] == NULL)
916 vq = dev->data->rx_queues[i];
920 for (i = 0; i < dev->data->nb_tx_queues; i++) {
921 if (dev->data->tx_queues[i] == NULL)
923 vq = dev->data->tx_queues[i];
926 vq->stats.missed_pkts = 0;
931 eth_queue_release(void *q)
937 eth_link_update(struct rte_eth_dev *dev __rte_unused,
938 int wait_to_complete __rte_unused)
944 * Disable features in feature_mask. Returns 0 on success.
947 rte_eth_vhost_feature_disable(uint64_t feature_mask)
949 return rte_vhost_feature_disable(feature_mask);
953 * Enable features in feature_mask. Returns 0 on success.
956 rte_eth_vhost_feature_enable(uint64_t feature_mask)
958 return rte_vhost_feature_enable(feature_mask);
961 /* Returns currently supported vhost features */
963 rte_eth_vhost_feature_get(void)
965 return rte_vhost_feature_get();
968 static const struct eth_dev_ops ops = {
969 .dev_start = eth_dev_start,
970 .dev_stop = eth_dev_stop,
971 .dev_configure = eth_dev_configure,
972 .dev_infos_get = eth_dev_info,
973 .rx_queue_setup = eth_rx_queue_setup,
974 .tx_queue_setup = eth_tx_queue_setup,
975 .rx_queue_release = eth_queue_release,
976 .tx_queue_release = eth_queue_release,
977 .link_update = eth_link_update,
978 .stats_get = eth_stats_get,
979 .stats_reset = eth_stats_reset,
980 .xstats_reset = vhost_dev_xstats_reset,
981 .xstats_get = vhost_dev_xstats_get,
982 .xstats_get_names = vhost_dev_xstats_get_names,
986 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
987 const unsigned numa_node, uint64_t flags)
989 struct rte_eth_dev_data *data = NULL;
990 struct pmd_internal *internal = NULL;
991 struct rte_eth_dev *eth_dev = NULL;
992 struct ether_addr *eth_addr = NULL;
993 struct rte_vhost_vring_state *vring_state = NULL;
994 struct internal_list *list = NULL;
996 RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
999 /* now do all data allocation - for eth_dev structure, dummy pci driver
1000 * and internal (private) data
1002 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1006 internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
1007 if (internal == NULL)
1010 list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1014 /* reserve an ethdev entry */
1015 eth_dev = rte_eth_dev_allocate(name);
1016 if (eth_dev == NULL)
1019 eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1020 if (eth_addr == NULL)
1022 *eth_addr = base_eth_addr;
1023 eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1025 vring_state = rte_zmalloc_socket(name,
1026 sizeof(*vring_state), 0, numa_node);
1027 if (vring_state == NULL)
1030 /* now put it all together
1031 * - store queue data in internal,
1032 * - store numa_node info in ethdev data
1033 * - point eth_dev_data to internals
1034 * - and point eth_dev structure to new eth_dev_data structure
1036 internal->dev_name = strdup(name);
1037 if (internal->dev_name == NULL)
1039 internal->iface_name = strdup(iface_name);
1040 if (internal->iface_name == NULL)
1043 list->eth_dev = eth_dev;
1044 pthread_mutex_lock(&internal_list_lock);
1045 TAILQ_INSERT_TAIL(&internal_list, list, next);
1046 pthread_mutex_unlock(&internal_list_lock);
1048 rte_spinlock_init(&vring_state->lock);
1049 vring_states[eth_dev->data->port_id] = vring_state;
1051 data->dev_private = internal;
1052 data->port_id = eth_dev->data->port_id;
1053 memmove(data->name, eth_dev->data->name, sizeof(data->name));
1054 data->nb_rx_queues = queues;
1055 data->nb_tx_queues = queues;
1056 internal->max_queues = queues;
1057 data->dev_link = pmd_link;
1058 data->mac_addrs = eth_addr;
1060 /* We'll replace the 'data' originally allocated by eth_dev. So the
1061 * vhost PMD resources won't be shared between multi processes.
1063 eth_dev->data = data;
1064 eth_dev->dev_ops = &ops;
1065 eth_dev->driver = NULL;
1067 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1068 data->kdrv = RTE_KDRV_NONE;
1069 data->drv_name = internal->dev_name;
1070 data->numa_node = numa_node;
1072 /* finally assign rx and tx ops */
1073 eth_dev->rx_pkt_burst = eth_vhost_rx;
1074 eth_dev->tx_pkt_burst = eth_vhost_tx;
1076 if (rte_vhost_driver_register(iface_name, flags))
1079 /* We need only one message handling thread */
1080 if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
1081 if (vhost_driver_session_start())
1085 return data->port_id;
1089 free(internal->dev_name);
1090 rte_free(vring_state);
1093 rte_eth_dev_release_port(eth_dev);
1102 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1104 const char **iface_name = extra_args;
1109 *iface_name = value;
1115 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1117 uint16_t *n = extra_args;
1119 if (value == NULL || extra_args == NULL)
1122 *n = (uint16_t)strtoul(value, NULL, 0);
1123 if (*n == USHRT_MAX && errno == ERANGE)
1130 rte_pmd_vhost_probe(const char *name, const char *params)
1132 struct rte_kvargs *kvlist = NULL;
1137 int client_mode = 0;
1138 int dequeue_zero_copy = 0;
1140 RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
1142 kvlist = rte_kvargs_parse(params, valid_arguments);
1146 if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1147 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1148 &open_iface, &iface_name);
1156 if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1157 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1158 &open_int, &queues);
1159 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1165 if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1166 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1167 &open_int, &client_mode);
1172 flags |= RTE_VHOST_USER_CLIENT;
1175 if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1176 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1177 &open_int, &dequeue_zero_copy);
1181 if (dequeue_zero_copy)
1182 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1185 eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
1188 rte_kvargs_free(kvlist);
1193 rte_pmd_vhost_remove(const char *name)
1195 struct rte_eth_dev *eth_dev = NULL;
1196 struct pmd_internal *internal;
1197 struct internal_list *list;
1200 RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1202 /* find an ethdev entry */
1203 eth_dev = rte_eth_dev_allocated(name);
1204 if (eth_dev == NULL)
1207 internal = eth_dev->data->dev_private;
1208 if (internal == NULL)
1211 list = find_internal_resource(internal->iface_name);
1215 pthread_mutex_lock(&internal_list_lock);
1216 TAILQ_REMOVE(&internal_list, list, next);
1217 pthread_mutex_unlock(&internal_list_lock);
1220 eth_dev_stop(eth_dev);
1222 rte_vhost_driver_unregister(internal->iface_name);
1224 if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
1225 vhost_driver_session_stop();
1227 rte_free(vring_states[eth_dev->data->port_id]);
1228 vring_states[eth_dev->data->port_id] = NULL;
1230 free(internal->dev_name);
1231 free(internal->iface_name);
1233 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
1234 rte_free(eth_dev->data->rx_queues[i]);
1235 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1236 rte_free(eth_dev->data->tx_queues[i]);
1238 rte_free(eth_dev->data->mac_addrs);
1239 rte_free(eth_dev->data);
1242 rte_eth_dev_release_port(eth_dev);
1247 static struct rte_vdev_driver pmd_vhost_drv = {
1248 .probe = rte_pmd_vhost_probe,
1249 .remove = rte_pmd_vhost_remove,
1252 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1253 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1254 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,