4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_ethdev.h>
41 #include <rte_ethdev_pci.h>
42 #include <rte_memcpy.h>
43 #include <rte_string_fns.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_atomic.h>
47 #include <rte_branch_prediction.h>
49 #include <rte_bus_pci.h>
50 #include <rte_ether.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_cpuflags.h>
55 #include <rte_memory.h>
59 #include "virtio_ethdev.h"
60 #include "virtio_pci.h"
61 #include "virtio_logs.h"
62 #include "virtqueue.h"
63 #include "virtio_rxtx.h"
65 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
66 static int virtio_dev_configure(struct rte_eth_dev *dev);
67 static int virtio_dev_start(struct rte_eth_dev *dev);
68 static void virtio_dev_stop(struct rte_eth_dev *dev);
69 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static void virtio_dev_info_get(struct rte_eth_dev *dev,
74 struct rte_eth_dev_info *dev_info);
75 static int virtio_dev_link_update(struct rte_eth_dev *dev,
76 int wait_to_complete);
77 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
79 static void virtio_set_hwaddr(struct virtio_hw *hw);
80 static void virtio_get_hwaddr(struct virtio_hw *hw);
82 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
83 struct rte_eth_stats *stats);
84 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
85 struct rte_eth_xstat *xstats, unsigned n);
86 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
87 struct rte_eth_xstat_name *xstats_names,
89 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
90 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
91 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
92 uint16_t vlan_id, int on);
93 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
94 struct ether_addr *mac_addr,
95 uint32_t index, uint32_t vmdq);
96 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
97 static void virtio_mac_addr_set(struct rte_eth_dev *dev,
98 struct ether_addr *mac_addr);
100 static int virtio_dev_queue_stats_mapping_set(
101 struct rte_eth_dev *eth_dev,
107 * The set of PCI devices this driver supports
109 static const struct rte_pci_id pci_id_virtio_map[] = {
110 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
111 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
112 { .vendor_id = 0, /* sentinel */ },
115 struct rte_virtio_xstats_name_off {
116 char name[RTE_ETH_XSTATS_NAME_SIZE];
120 /* [rt]x_qX_ is prepended to the name string here */
121 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
122 {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
123 {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
124 {"errors", offsetof(struct virtnet_rx, stats.errors)},
125 {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
126 {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
127 {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
128 {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
129 {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
130 {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
131 {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
132 {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
133 {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
134 {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
137 /* [rt]x_qX_ is prepended to the name string here */
138 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
139 {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
140 {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
141 {"errors", offsetof(struct virtnet_tx, stats.errors)},
142 {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
143 {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
144 {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
145 {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
146 {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
147 {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
148 {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
149 {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
150 {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
151 {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
154 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
155 sizeof(rte_virtio_rxq_stat_strings[0]))
156 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
157 sizeof(rte_virtio_txq_stat_strings[0]))
159 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
162 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
163 int *dlen, int pkt_num)
167 virtio_net_ctrl_ack status = ~0;
168 struct virtio_pmd_ctrl *result;
169 struct virtqueue *vq;
171 ctrl->status = status;
173 if (!cvq || !cvq->vq) {
174 PMD_INIT_LOG(ERR, "Control queue is not supported.");
178 head = vq->vq_desc_head_idx;
180 PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
181 "vq->hw->cvq = %p vq = %p",
182 vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
184 if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
187 memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
188 sizeof(struct virtio_pmd_ctrl));
191 * Format is enforced in qemu code:
192 * One TX packet for header;
193 * At least one TX packet per argument;
194 * One RX packet for ACK.
196 vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
197 vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
198 vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
200 i = vq->vq_ring.desc[head].next;
202 for (k = 0; k < pkt_num; k++) {
203 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
204 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
205 + sizeof(struct virtio_net_ctrl_hdr)
206 + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
207 vq->vq_ring.desc[i].len = dlen[k];
210 i = vq->vq_ring.desc[i].next;
213 vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
214 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
215 + sizeof(struct virtio_net_ctrl_hdr);
216 vq->vq_ring.desc[i].len = sizeof(ctrl->status);
219 vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
221 vq_update_avail_ring(vq, head);
222 vq_update_avail_idx(vq);
224 PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
226 virtqueue_notify(vq);
229 while (VIRTQUEUE_NUSED(vq) == 0) {
234 while (VIRTQUEUE_NUSED(vq)) {
235 uint32_t idx, desc_idx, used_idx;
236 struct vring_used_elem *uep;
238 used_idx = (uint32_t)(vq->vq_used_cons_idx
239 & (vq->vq_nentries - 1));
240 uep = &vq->vq_ring.used->ring[used_idx];
241 idx = (uint32_t) uep->id;
244 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
245 desc_idx = vq->vq_ring.desc[desc_idx].next;
249 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
250 vq->vq_desc_head_idx = idx;
252 vq->vq_used_cons_idx++;
256 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
257 vq->vq_free_cnt, vq->vq_desc_head_idx);
259 result = cvq->virtio_net_hdr_mz->addr;
261 return result->status;
265 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
267 struct virtio_hw *hw = dev->data->dev_private;
268 struct virtio_pmd_ctrl ctrl;
272 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
273 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
274 memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
276 dlen[0] = sizeof(uint16_t);
278 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
280 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
281 "failed, this is too late now...");
289 virtio_dev_queue_release(void *queue __rte_unused)
295 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
297 if (vtpci_queue_idx == hw->max_queue_pairs * 2)
299 else if (vtpci_queue_idx % 2 == 0)
306 virtio_get_nr_vq(struct virtio_hw *hw)
308 uint16_t nr_vq = hw->max_queue_pairs * 2;
310 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
317 virtio_init_vring(struct virtqueue *vq)
319 int size = vq->vq_nentries;
320 struct vring *vr = &vq->vq_ring;
321 uint8_t *ring_mem = vq->vq_ring_virt_mem;
323 PMD_INIT_FUNC_TRACE();
326 * Reinitialise since virtio port might have been stopped and restarted
328 memset(ring_mem, 0, vq->vq_ring_size);
329 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
330 vq->vq_used_cons_idx = 0;
331 vq->vq_desc_head_idx = 0;
332 vq->vq_avail_idx = 0;
333 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
334 vq->vq_free_cnt = vq->vq_nentries;
335 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
337 vring_desc_init(vr->desc, size);
340 * Disable device(host) interrupting guest
342 virtqueue_disable_intr(vq);
346 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
348 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
349 char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
350 const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
351 unsigned int vq_size, size;
352 struct virtio_hw *hw = dev->data->dev_private;
353 struct virtnet_rx *rxvq = NULL;
354 struct virtnet_tx *txvq = NULL;
355 struct virtnet_ctl *cvq = NULL;
356 struct virtqueue *vq;
357 size_t sz_hdr_mz = 0;
358 void *sw_ring = NULL;
359 int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
362 PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
365 * Read the virtqueue size from the Queue Size field
366 * Always power of 2 and if 0 virtqueue does not exist
368 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
369 PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
371 PMD_INIT_LOG(ERR, "virtqueue does not exist");
375 if (!rte_is_power_of_2(vq_size)) {
376 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
380 snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
381 dev->data->port_id, vtpci_queue_idx);
383 size = RTE_ALIGN_CEIL(sizeof(*vq) +
384 vq_size * sizeof(struct vq_desc_extra),
385 RTE_CACHE_LINE_SIZE);
386 if (queue_type == VTNET_TQ) {
388 * For each xmit packet, allocate a virtio_net_hdr
389 * and indirect ring elements
391 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
392 } else if (queue_type == VTNET_CQ) {
393 /* Allocate a page for control vq command, data and status */
394 sz_hdr_mz = PAGE_SIZE;
397 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
400 PMD_INIT_LOG(ERR, "can not allocate vq");
403 hw->vqs[vtpci_queue_idx] = vq;
406 vq->vq_queue_index = vtpci_queue_idx;
407 vq->vq_nentries = vq_size;
410 * Reserve a memzone for vring elements
412 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
413 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
414 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
415 size, vq->vq_ring_size);
417 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
419 0, VIRTIO_PCI_VRING_ALIGN);
421 if (rte_errno == EEXIST)
422 mz = rte_memzone_lookup(vq_name);
429 memset(mz->addr, 0, mz->len);
431 vq->vq_ring_mem = mz->phys_addr;
432 vq->vq_ring_virt_mem = mz->addr;
433 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
434 (uint64_t)mz->phys_addr);
435 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
436 (uint64_t)(uintptr_t)mz->addr);
438 virtio_init_vring(vq);
441 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
442 dev->data->port_id, vtpci_queue_idx);
443 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
445 RTE_CACHE_LINE_SIZE);
446 if (hdr_mz == NULL) {
447 if (rte_errno == EEXIST)
448 hdr_mz = rte_memzone_lookup(vq_hdr_name);
449 if (hdr_mz == NULL) {
456 if (queue_type == VTNET_RQ) {
457 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
458 sizeof(vq->sw_ring[0]);
460 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
461 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
463 PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
468 vq->sw_ring = sw_ring;
471 rxvq->port_id = dev->data->port_id;
473 } else if (queue_type == VTNET_TQ) {
476 txvq->port_id = dev->data->port_id;
478 txvq->virtio_net_hdr_mz = hdr_mz;
479 txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
480 } else if (queue_type == VTNET_CQ) {
484 cvq->virtio_net_hdr_mz = hdr_mz;
485 cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
486 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
491 /* For virtio_user case (that is when hw->dev is NULL), we use
492 * virtual address. And we need properly set _offset_, please see
493 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
495 if (!hw->virtio_user_dev)
496 vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
498 vq->vq_ring_mem = (uintptr_t)mz->addr;
499 vq->offset = offsetof(struct rte_mbuf, buf_addr);
500 if (queue_type == VTNET_TQ)
501 txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
502 else if (queue_type == VTNET_CQ)
503 cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
506 if (queue_type == VTNET_TQ) {
507 struct virtio_tx_region *txr;
511 memset(txr, 0, vq_size * sizeof(*txr));
512 for (i = 0; i < vq_size; i++) {
513 struct vring_desc *start_dp = txr[i].tx_indir;
515 vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
517 /* first indirect descriptor is always the tx header */
518 start_dp->addr = txvq->virtio_net_hdr_mem
520 + offsetof(struct virtio_tx_region, tx_hdr);
522 start_dp->len = hw->vtnet_hdr_size;
523 start_dp->flags = VRING_DESC_F_NEXT;
527 if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
528 PMD_INIT_LOG(ERR, "setup_queue failed");
536 rte_memzone_free(hdr_mz);
537 rte_memzone_free(mz);
544 virtio_free_queues(struct virtio_hw *hw)
546 uint16_t nr_vq = virtio_get_nr_vq(hw);
547 struct virtqueue *vq;
554 for (i = 0; i < nr_vq; i++) {
559 queue_type = virtio_get_queue_type(hw, i);
560 if (queue_type == VTNET_RQ) {
561 rte_free(vq->sw_ring);
562 rte_memzone_free(vq->rxq.mz);
563 } else if (queue_type == VTNET_TQ) {
564 rte_memzone_free(vq->txq.mz);
565 rte_memzone_free(vq->txq.virtio_net_hdr_mz);
567 rte_memzone_free(vq->cq.mz);
568 rte_memzone_free(vq->cq.virtio_net_hdr_mz);
580 virtio_alloc_queues(struct rte_eth_dev *dev)
582 struct virtio_hw *hw = dev->data->dev_private;
583 uint16_t nr_vq = virtio_get_nr_vq(hw);
587 hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
589 PMD_INIT_LOG(ERR, "failed to allocate vqs");
593 for (i = 0; i < nr_vq; i++) {
594 ret = virtio_init_queue(dev, i);
596 virtio_free_queues(hw);
604 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
607 virtio_dev_close(struct rte_eth_dev *dev)
609 struct virtio_hw *hw = dev->data->dev_private;
610 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
612 PMD_INIT_LOG(DEBUG, "virtio_dev_close");
615 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
616 VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
618 virtio_queues_unbind_intr(dev);
620 if (intr_conf->lsc || intr_conf->rxq) {
621 rte_intr_disable(dev->intr_handle);
622 rte_intr_efd_disable(dev->intr_handle);
623 rte_free(dev->intr_handle->intr_vec);
624 dev->intr_handle->intr_vec = NULL;
628 virtio_dev_free_mbufs(dev);
629 virtio_free_queues(hw);
633 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
635 struct virtio_hw *hw = dev->data->dev_private;
636 struct virtio_pmd_ctrl ctrl;
640 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
641 PMD_INIT_LOG(INFO, "host does not support rx control");
645 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
646 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
650 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
652 PMD_INIT_LOG(ERR, "Failed to enable promisc");
656 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
658 struct virtio_hw *hw = dev->data->dev_private;
659 struct virtio_pmd_ctrl ctrl;
663 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
664 PMD_INIT_LOG(INFO, "host does not support rx control");
668 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
669 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
673 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
675 PMD_INIT_LOG(ERR, "Failed to disable promisc");
679 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
681 struct virtio_hw *hw = dev->data->dev_private;
682 struct virtio_pmd_ctrl ctrl;
686 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
687 PMD_INIT_LOG(INFO, "host does not support rx control");
691 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
692 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
696 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
698 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
702 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
704 struct virtio_hw *hw = dev->data->dev_private;
705 struct virtio_pmd_ctrl ctrl;
709 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
710 PMD_INIT_LOG(INFO, "host does not support rx control");
714 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
715 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
719 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
721 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
724 #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
726 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
728 struct virtio_hw *hw = dev->data->dev_private;
729 uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
731 uint32_t frame_size = mtu + ether_hdr_len;
732 uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
734 max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
736 if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
737 PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
738 ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
745 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
747 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
748 struct virtqueue *vq = rxvq->vq;
750 virtqueue_enable_intr(vq);
755 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
757 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
758 struct virtqueue *vq = rxvq->vq;
760 virtqueue_disable_intr(vq);
765 * dev_ops for virtio, bare necessities for basic operation
767 static const struct eth_dev_ops virtio_eth_dev_ops = {
768 .dev_configure = virtio_dev_configure,
769 .dev_start = virtio_dev_start,
770 .dev_stop = virtio_dev_stop,
771 .dev_close = virtio_dev_close,
772 .promiscuous_enable = virtio_dev_promiscuous_enable,
773 .promiscuous_disable = virtio_dev_promiscuous_disable,
774 .allmulticast_enable = virtio_dev_allmulticast_enable,
775 .allmulticast_disable = virtio_dev_allmulticast_disable,
776 .mtu_set = virtio_mtu_set,
777 .dev_infos_get = virtio_dev_info_get,
778 .stats_get = virtio_dev_stats_get,
779 .xstats_get = virtio_dev_xstats_get,
780 .xstats_get_names = virtio_dev_xstats_get_names,
781 .stats_reset = virtio_dev_stats_reset,
782 .xstats_reset = virtio_dev_stats_reset,
783 .link_update = virtio_dev_link_update,
784 .vlan_offload_set = virtio_dev_vlan_offload_set,
785 .rx_queue_setup = virtio_dev_rx_queue_setup,
786 .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
787 .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
788 .rx_queue_release = virtio_dev_queue_release,
789 .rx_descriptor_done = virtio_dev_rx_queue_done,
790 .tx_queue_setup = virtio_dev_tx_queue_setup,
791 .tx_queue_release = virtio_dev_queue_release,
792 /* collect stats per queue */
793 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
794 .vlan_filter_set = virtio_vlan_filter_set,
795 .mac_addr_add = virtio_mac_addr_add,
796 .mac_addr_remove = virtio_mac_addr_remove,
797 .mac_addr_set = virtio_mac_addr_set,
801 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
802 struct rte_eth_link *link)
804 struct rte_eth_link *dst = link;
805 struct rte_eth_link *src = &(dev->data->dev_link);
807 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
808 *(uint64_t *)src) == 0)
815 * Atomically writes the link status information into global
816 * structure rte_eth_dev.
819 * - Pointer to the structure rte_eth_dev to read from.
820 * - Pointer to the buffer to be saved with the link status.
823 * - On success, zero.
824 * - On failure, negative value.
827 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
828 struct rte_eth_link *link)
830 struct rte_eth_link *dst = &(dev->data->dev_link);
831 struct rte_eth_link *src = link;
833 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
834 *(uint64_t *)src) == 0)
841 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
845 for (i = 0; i < dev->data->nb_tx_queues; i++) {
846 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
850 stats->opackets += txvq->stats.packets;
851 stats->obytes += txvq->stats.bytes;
852 stats->oerrors += txvq->stats.errors;
854 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
855 stats->q_opackets[i] = txvq->stats.packets;
856 stats->q_obytes[i] = txvq->stats.bytes;
860 for (i = 0; i < dev->data->nb_rx_queues; i++) {
861 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
865 stats->ipackets += rxvq->stats.packets;
866 stats->ibytes += rxvq->stats.bytes;
867 stats->ierrors += rxvq->stats.errors;
869 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
870 stats->q_ipackets[i] = rxvq->stats.packets;
871 stats->q_ibytes[i] = rxvq->stats.bytes;
875 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
878 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
879 struct rte_eth_xstat_name *xstats_names,
880 __rte_unused unsigned limit)
886 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
887 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
889 if (xstats_names != NULL) {
890 /* Note: limit checked in rte_eth_xstats_names() */
892 for (i = 0; i < dev->data->nb_rx_queues; i++) {
893 struct virtqueue *rxvq = dev->data->rx_queues[i];
896 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
897 snprintf(xstats_names[count].name,
898 sizeof(xstats_names[count].name),
900 rte_virtio_rxq_stat_strings[t].name);
905 for (i = 0; i < dev->data->nb_tx_queues; i++) {
906 struct virtqueue *txvq = dev->data->tx_queues[i];
909 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
910 snprintf(xstats_names[count].name,
911 sizeof(xstats_names[count].name),
913 rte_virtio_txq_stat_strings[t].name);
923 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
929 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
930 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
935 for (i = 0; i < dev->data->nb_rx_queues; i++) {
936 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
943 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
944 xstats[count].value = *(uint64_t *)(((char *)rxvq) +
945 rte_virtio_rxq_stat_strings[t].offset);
946 xstats[count].id = count;
951 for (i = 0; i < dev->data->nb_tx_queues; i++) {
952 struct virtnet_tx *txvq = dev->data->tx_queues[i];
959 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
960 xstats[count].value = *(uint64_t *)(((char *)txvq) +
961 rte_virtio_txq_stat_strings[t].offset);
962 xstats[count].id = count;
971 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
973 virtio_update_stats(dev, stats);
979 virtio_dev_stats_reset(struct rte_eth_dev *dev)
983 for (i = 0; i < dev->data->nb_tx_queues; i++) {
984 struct virtnet_tx *txvq = dev->data->tx_queues[i];
988 txvq->stats.packets = 0;
989 txvq->stats.bytes = 0;
990 txvq->stats.errors = 0;
991 txvq->stats.multicast = 0;
992 txvq->stats.broadcast = 0;
993 memset(txvq->stats.size_bins, 0,
994 sizeof(txvq->stats.size_bins[0]) * 8);
997 for (i = 0; i < dev->data->nb_rx_queues; i++) {
998 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1002 rxvq->stats.packets = 0;
1003 rxvq->stats.bytes = 0;
1004 rxvq->stats.errors = 0;
1005 rxvq->stats.multicast = 0;
1006 rxvq->stats.broadcast = 0;
1007 memset(rxvq->stats.size_bins, 0,
1008 sizeof(rxvq->stats.size_bins[0]) * 8);
1013 virtio_set_hwaddr(struct virtio_hw *hw)
1015 vtpci_write_dev_config(hw,
1016 offsetof(struct virtio_net_config, mac),
1017 &hw->mac_addr, ETHER_ADDR_LEN);
1021 virtio_get_hwaddr(struct virtio_hw *hw)
1023 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
1024 vtpci_read_dev_config(hw,
1025 offsetof(struct virtio_net_config, mac),
1026 &hw->mac_addr, ETHER_ADDR_LEN);
1028 eth_random_addr(&hw->mac_addr[0]);
1029 virtio_set_hwaddr(hw);
1034 virtio_mac_table_set(struct virtio_hw *hw,
1035 const struct virtio_net_ctrl_mac *uc,
1036 const struct virtio_net_ctrl_mac *mc)
1038 struct virtio_pmd_ctrl ctrl;
1041 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1042 PMD_DRV_LOG(INFO, "host does not support mac table");
1046 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1047 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1049 len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
1050 memcpy(ctrl.data, uc, len[0]);
1052 len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
1053 memcpy(ctrl.data + len[0], mc, len[1]);
1055 err = virtio_send_command(hw->cvq, &ctrl, len, 2);
1057 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
1062 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1063 uint32_t index, uint32_t vmdq __rte_unused)
1065 struct virtio_hw *hw = dev->data->dev_private;
1066 const struct ether_addr *addrs = dev->data->mac_addrs;
1068 struct virtio_net_ctrl_mac *uc, *mc;
1070 if (index >= VIRTIO_MAX_MAC_ADDRS) {
1071 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1075 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1077 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1080 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1081 const struct ether_addr *addr
1082 = (i == index) ? mac_addr : addrs + i;
1083 struct virtio_net_ctrl_mac *tbl
1084 = is_multicast_ether_addr(addr) ? mc : uc;
1086 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
1089 return virtio_mac_table_set(hw, uc, mc);
1093 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1095 struct virtio_hw *hw = dev->data->dev_private;
1096 struct ether_addr *addrs = dev->data->mac_addrs;
1097 struct virtio_net_ctrl_mac *uc, *mc;
1100 if (index >= VIRTIO_MAX_MAC_ADDRS) {
1101 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1105 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1107 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1110 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1111 struct virtio_net_ctrl_mac *tbl;
1113 if (i == index || is_zero_ether_addr(addrs + i))
1116 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
1117 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
1120 virtio_mac_table_set(hw, uc, mc);
1124 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1126 struct virtio_hw *hw = dev->data->dev_private;
1128 memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
1130 /* Use atomic update if available */
1131 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1132 struct virtio_pmd_ctrl ctrl;
1133 int len = ETHER_ADDR_LEN;
1135 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1136 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1138 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
1139 virtio_send_command(hw->cvq, &ctrl, &len, 1);
1140 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1141 virtio_set_hwaddr(hw);
1145 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1147 struct virtio_hw *hw = dev->data->dev_private;
1148 struct virtio_pmd_ctrl ctrl;
1151 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1154 ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1155 ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1156 memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1157 len = sizeof(vlan_id);
1159 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1163 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1165 uint64_t host_features;
1167 /* Prepare guest_features: feature that driver wants to support */
1168 PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1171 /* Read device(host) feature bits */
1172 host_features = VTPCI_OPS(hw)->get_features(hw);
1173 PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1176 /* If supported, ensure MTU value is valid before acknowledging it. */
1177 if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1178 struct virtio_net_config config;
1180 vtpci_read_dev_config(hw,
1181 offsetof(struct virtio_net_config, mtu),
1182 &config.mtu, sizeof(config.mtu));
1184 if (config.mtu < ETHER_MIN_MTU)
1185 req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1189 * Negotiate features: Subset of device feature bits are written back
1190 * guest feature bits.
1192 hw->guest_features = req_features;
1193 hw->guest_features = vtpci_negotiate_features(hw, host_features);
1194 PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1195 hw->guest_features);
1198 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1200 "VIRTIO_F_VERSION_1 features is not enabled.");
1203 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1204 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1206 "failed to set FEATURES_OK status!");
1211 hw->req_guest_features = req_features;
1217 * Process Virtio Config changed interrupt and call the callback
1218 * if link state changed.
1221 virtio_interrupt_handler(void *param)
1223 struct rte_eth_dev *dev = param;
1224 struct virtio_hw *hw = dev->data->dev_private;
1227 /* Read interrupt status which clears interrupt */
1228 isr = vtpci_isr(hw);
1229 PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1231 if (rte_intr_enable(dev->intr_handle) < 0)
1232 PMD_DRV_LOG(ERR, "interrupt enable failed");
1234 if (isr & VIRTIO_PCI_ISR_CONFIG) {
1235 if (virtio_dev_link_update(dev, 0) == 0)
1236 _rte_eth_dev_callback_process(dev,
1237 RTE_ETH_EVENT_INTR_LSC,
1243 /* set rx and tx handlers according to what is supported */
1245 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1247 struct virtio_hw *hw = eth_dev->data->dev_private;
1249 if (hw->use_simple_rx) {
1250 PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
1251 eth_dev->data->port_id);
1252 eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1253 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1255 "virtio: using mergeable buffer Rx path on port %u",
1256 eth_dev->data->port_id);
1257 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1259 PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1260 eth_dev->data->port_id);
1261 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1264 if (hw->use_simple_tx) {
1265 PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
1266 eth_dev->data->port_id);
1267 eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
1269 PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1270 eth_dev->data->port_id);
1271 eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1275 /* Only support 1:1 queue/interrupt mapping so far.
1276 * TODO: support n:1 queue/interrupt mapping when there are limited number of
1277 * interrupt vectors (<N+1).
1280 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1283 struct virtio_hw *hw = dev->data->dev_private;
1285 PMD_INIT_LOG(INFO, "queue/interrupt binding");
1286 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1287 dev->intr_handle->intr_vec[i] = i + 1;
1288 if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1289 VIRTIO_MSI_NO_VECTOR) {
1290 PMD_DRV_LOG(ERR, "failed to set queue vector");
1299 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1302 struct virtio_hw *hw = dev->data->dev_private;
1304 PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1305 for (i = 0; i < dev->data->nb_rx_queues; ++i)
1306 VTPCI_OPS(hw)->set_queue_irq(hw,
1307 hw->vqs[i * VTNET_CQ],
1308 VIRTIO_MSI_NO_VECTOR);
1312 virtio_configure_intr(struct rte_eth_dev *dev)
1314 struct virtio_hw *hw = dev->data->dev_private;
1316 if (!rte_intr_cap_multiple(dev->intr_handle)) {
1317 PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1321 if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
1322 PMD_INIT_LOG(ERR, "Fail to create eventfd");
1326 if (!dev->intr_handle->intr_vec) {
1327 dev->intr_handle->intr_vec =
1328 rte_zmalloc("intr_vec",
1329 hw->max_queue_pairs * sizeof(int), 0);
1330 if (!dev->intr_handle->intr_vec) {
1331 PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1332 hw->max_queue_pairs);
1337 /* Re-register callback to update max_intr */
1338 rte_intr_callback_unregister(dev->intr_handle,
1339 virtio_interrupt_handler,
1341 rte_intr_callback_register(dev->intr_handle,
1342 virtio_interrupt_handler,
1345 /* DO NOT try to remove this! This function will enable msix, or QEMU
1346 * will encounter SIGSEGV when DRIVER_OK is sent.
1347 * And for legacy devices, this should be done before queue/vec binding
1348 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1349 * (22) will be ignored.
1351 if (rte_intr_enable(dev->intr_handle) < 0) {
1352 PMD_DRV_LOG(ERR, "interrupt enable failed");
1356 if (virtio_queues_bind_intr(dev) < 0) {
1357 PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1364 /* reset device and renegotiate features if needed */
1366 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1368 struct virtio_hw *hw = eth_dev->data->dev_private;
1369 struct virtio_net_config *config;
1370 struct virtio_net_config local_config;
1371 struct rte_pci_device *pci_dev = NULL;
1374 /* Reset the device although not necessary at startup */
1377 /* Tell the host we've noticed this device. */
1378 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1380 /* Tell the host we've known how to drive the device. */
1381 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1382 if (virtio_negotiate_features(hw, req_features) < 0)
1385 if (!hw->virtio_user_dev) {
1386 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1387 rte_eth_copy_pci_info(eth_dev, pci_dev);
1390 /* If host does not support both status and MSI-X then disable LSC */
1391 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
1392 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1394 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1396 /* Setting up rx_header size for the device */
1397 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1398 vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
1399 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1401 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1403 /* Copy the permanent MAC address to: virtio_hw */
1404 virtio_get_hwaddr(hw);
1405 ether_addr_copy((struct ether_addr *) hw->mac_addr,
1406 ð_dev->data->mac_addrs[0]);
1408 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1409 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1410 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1412 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1413 config = &local_config;
1415 vtpci_read_dev_config(hw,
1416 offsetof(struct virtio_net_config, mac),
1417 &config->mac, sizeof(config->mac));
1419 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1420 vtpci_read_dev_config(hw,
1421 offsetof(struct virtio_net_config, status),
1422 &config->status, sizeof(config->status));
1425 "VIRTIO_NET_F_STATUS is not supported");
1429 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1430 vtpci_read_dev_config(hw,
1431 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1432 &config->max_virtqueue_pairs,
1433 sizeof(config->max_virtqueue_pairs));
1436 "VIRTIO_NET_F_MQ is not supported");
1437 config->max_virtqueue_pairs = 1;
1440 hw->max_queue_pairs = config->max_virtqueue_pairs;
1442 if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
1443 vtpci_read_dev_config(hw,
1444 offsetof(struct virtio_net_config, mtu),
1446 sizeof(config->mtu));
1449 * MTU value has already been checked at negotiation
1450 * time, but check again in case it has changed since
1451 * then, which should not happen.
1453 if (config->mtu < ETHER_MIN_MTU) {
1454 PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1459 hw->max_mtu = config->mtu;
1460 /* Set initial MTU to maximum one supported by vhost */
1461 eth_dev->data->mtu = config->mtu;
1464 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
1465 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1468 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1469 config->max_virtqueue_pairs);
1470 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1472 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1473 config->mac[0], config->mac[1],
1474 config->mac[2], config->mac[3],
1475 config->mac[4], config->mac[5]);
1477 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1478 hw->max_queue_pairs = 1;
1481 ret = virtio_alloc_queues(eth_dev);
1485 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1486 if (virtio_configure_intr(eth_dev) < 0) {
1487 PMD_INIT_LOG(ERR, "failed to configure interrupt");
1492 vtpci_reinit_complete(hw);
1495 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1496 eth_dev->data->port_id, pci_dev->id.vendor_id,
1497 pci_dev->id.device_id);
1503 * Remap the PCI device again (IO port map for legacy device and
1504 * memory map for modern device), so that the secondary process
1505 * could have the PCI initiated correctly.
1508 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
1512 * We don't have to re-parse the PCI config space, since
1513 * rte_pci_map_device() makes sure the mapped address
1514 * in secondary process would equal to the one mapped in
1515 * the primary process: error will be returned if that
1516 * requirement is not met.
1518 * That said, we could simply reuse all cap pointers
1519 * (such as dev_cfg, common_cfg, etc.) parsed from the
1520 * primary process, which is stored in shared memory.
1522 if (rte_pci_map_device(pci_dev)) {
1523 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
1527 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
1535 virtio_set_vtpci_ops(struct virtio_hw *hw)
1537 #ifdef RTE_VIRTIO_USER
1538 if (hw->virtio_user_dev)
1539 VTPCI_OPS(hw) = &virtio_user_ops;
1543 VTPCI_OPS(hw) = &modern_ops;
1545 VTPCI_OPS(hw) = &legacy_ops;
1549 * This function is based on probe() function in virtio_pci.c
1550 * It returns 0 on success.
1553 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1555 struct virtio_hw *hw = eth_dev->data->dev_private;
1558 RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
1560 eth_dev->dev_ops = &virtio_eth_dev_ops;
1562 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1563 if (!hw->virtio_user_dev) {
1564 ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1569 virtio_set_vtpci_ops(hw);
1570 set_rxtx_funcs(eth_dev);
1575 /* Allocate memory for storing MAC addresses */
1576 eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1577 if (eth_dev->data->mac_addrs == NULL) {
1579 "Failed to allocate %d bytes needed to store MAC addresses",
1580 VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1584 hw->port_id = eth_dev->data->port_id;
1585 /* For virtio_user case the hw->virtio_user_dev is populated by
1586 * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
1588 if (!hw->virtio_user_dev) {
1589 ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1594 /* reset device and negotiate default features */
1595 ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1599 /* Setup interrupt callback */
1600 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1601 rte_intr_callback_register(eth_dev->intr_handle,
1602 virtio_interrupt_handler, eth_dev);
1608 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1610 PMD_INIT_FUNC_TRACE();
1612 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1615 virtio_dev_stop(eth_dev);
1616 virtio_dev_close(eth_dev);
1618 eth_dev->dev_ops = NULL;
1619 eth_dev->tx_pkt_burst = NULL;
1620 eth_dev->rx_pkt_burst = NULL;
1622 rte_free(eth_dev->data->mac_addrs);
1623 eth_dev->data->mac_addrs = NULL;
1625 /* reset interrupt callback */
1626 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1627 rte_intr_callback_unregister(eth_dev->intr_handle,
1628 virtio_interrupt_handler,
1630 if (eth_dev->device)
1631 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
1633 PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1638 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1639 struct rte_pci_device *pci_dev)
1641 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
1642 eth_virtio_dev_init);
1645 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
1647 return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
1650 static struct rte_pci_driver rte_virtio_pmd = {
1652 .name = "net_virtio",
1654 .id_table = pci_id_virtio_map,
1656 .probe = eth_virtio_pci_probe,
1657 .remove = eth_virtio_pci_remove,
1660 RTE_INIT(rte_virtio_pmd_init);
1662 rte_virtio_pmd_init(void)
1664 if (rte_eal_iopl_init() != 0) {
1665 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1669 rte_pci_register(&rte_virtio_pmd);
1673 * Configure virtio device
1674 * It returns 0 on success.
1677 virtio_dev_configure(struct rte_eth_dev *dev)
1679 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1680 struct virtio_hw *hw = dev->data->dev_private;
1681 uint64_t req_features;
1684 PMD_INIT_LOG(DEBUG, "configure");
1685 req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
1687 if (dev->data->dev_conf.intr_conf.rxq) {
1688 ret = virtio_init_device(dev, hw->req_guest_features);
1693 /* The name hw_ip_checksum is a bit confusing since it can be
1694 * set by the application to request L3 and/or L4 checksums. In
1695 * case of virtio, only L4 checksum is supported.
1697 if (rxmode->hw_ip_checksum)
1698 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
1700 if (rxmode->enable_lro)
1702 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
1703 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
1705 /* if request features changed, reinit the device */
1706 if (req_features != hw->req_guest_features) {
1707 ret = virtio_init_device(dev, req_features);
1712 if (rxmode->hw_ip_checksum &&
1713 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
1715 "rx checksum not available on this host");
1719 if (rxmode->enable_lro &&
1720 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
1721 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
1723 "Large Receive Offload not available on this host");
1727 /* start control queue */
1728 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
1729 virtio_dev_cq_start(dev);
1731 hw->vlan_strip = rxmode->hw_vlan_strip;
1733 if (rxmode->hw_vlan_filter
1734 && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1736 "vlan filtering not available on this host");
1740 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1741 /* Enable vector (0) for Link State Intrerrupt */
1742 if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
1743 VIRTIO_MSI_NO_VECTOR) {
1744 PMD_DRV_LOG(ERR, "failed to set config vector");
1748 hw->use_simple_rx = 1;
1749 hw->use_simple_tx = 1;
1751 #if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
1752 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
1753 hw->use_simple_rx = 0;
1754 hw->use_simple_tx = 0;
1757 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1758 hw->use_simple_rx = 0;
1759 hw->use_simple_tx = 0;
1762 if (rxmode->hw_ip_checksum)
1763 hw->use_simple_rx = 0;
1770 virtio_dev_start(struct rte_eth_dev *dev)
1772 uint16_t nb_queues, i;
1773 struct virtnet_rx *rxvq;
1774 struct virtnet_tx *txvq __rte_unused;
1775 struct virtio_hw *hw = dev->data->dev_private;
1778 /* Finish the initialization of the queues */
1779 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1780 ret = virtio_dev_rx_queue_setup_finish(dev, i);
1784 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1785 ret = virtio_dev_tx_queue_setup_finish(dev, i);
1790 /* check if lsc interrupt feature is enabled */
1791 if (dev->data->dev_conf.intr_conf.lsc) {
1792 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1793 PMD_DRV_LOG(ERR, "link status not supported by host");
1798 /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
1799 * in device configure, but it could be unmapped when device is
1802 if (dev->data->dev_conf.intr_conf.lsc ||
1803 dev->data->dev_conf.intr_conf.rxq) {
1804 rte_intr_disable(dev->intr_handle);
1806 if (rte_intr_enable(dev->intr_handle) < 0) {
1807 PMD_DRV_LOG(ERR, "interrupt enable failed");
1812 /*Notify the backend
1813 *Otherwise the tap backend might already stop its queue due to fullness.
1814 *vhost backend will have no chance to be waked up
1816 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1817 if (hw->max_queue_pairs > 1) {
1818 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
1822 PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
1824 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1825 rxvq = dev->data->rx_queues[i];
1826 /* Flush the old packets */
1827 virtqueue_flush(rxvq->vq);
1828 virtqueue_notify(rxvq->vq);
1831 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1832 txvq = dev->data->tx_queues[i];
1833 virtqueue_notify(txvq->vq);
1836 PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
1838 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1839 rxvq = dev->data->rx_queues[i];
1840 VIRTQUEUE_DUMP(rxvq->vq);
1843 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1844 txvq = dev->data->tx_queues[i];
1845 VIRTQUEUE_DUMP(txvq->vq);
1848 set_rxtx_funcs(dev);
1851 /* Initialize Link state */
1852 virtio_dev_link_update(dev, 0);
1857 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
1859 struct rte_mbuf *buf;
1860 int i, mbuf_num = 0;
1862 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1863 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1866 "Before freeing rxq[%d] used and unused buf", i);
1867 VIRTQUEUE_DUMP(rxvq->vq);
1869 PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
1870 while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
1871 rte_pktmbuf_free(buf);
1875 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1877 "After freeing rxq[%d] used and unused buf", i);
1878 VIRTQUEUE_DUMP(rxvq->vq);
1881 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1882 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1885 "Before freeing txq[%d] used and unused bufs",
1887 VIRTQUEUE_DUMP(txvq->vq);
1890 while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
1891 rte_pktmbuf_free(buf);
1895 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1897 "After freeing txq[%d] used and unused buf", i);
1898 VIRTQUEUE_DUMP(txvq->vq);
1903 * Stop device: disable interrupt and mark link down
1906 virtio_dev_stop(struct rte_eth_dev *dev)
1908 struct virtio_hw *hw = dev->data->dev_private;
1909 struct rte_eth_link link;
1910 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
1912 PMD_INIT_LOG(DEBUG, "stop");
1914 if (intr_conf->lsc || intr_conf->rxq)
1915 rte_intr_disable(dev->intr_handle);
1918 memset(&link, 0, sizeof(link));
1919 virtio_dev_atomic_write_link_status(dev, &link);
1923 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1925 struct rte_eth_link link, old;
1927 struct virtio_hw *hw = dev->data->dev_private;
1928 memset(&link, 0, sizeof(link));
1929 virtio_dev_atomic_read_link_status(dev, &link);
1931 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1932 link.link_speed = SPEED_10G;
1934 if (hw->started == 0) {
1935 link.link_status = ETH_LINK_DOWN;
1936 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1937 PMD_INIT_LOG(DEBUG, "Get link status from hw");
1938 vtpci_read_dev_config(hw,
1939 offsetof(struct virtio_net_config, status),
1940 &status, sizeof(status));
1941 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
1942 link.link_status = ETH_LINK_DOWN;
1943 PMD_INIT_LOG(DEBUG, "Port %d is down",
1944 dev->data->port_id);
1946 link.link_status = ETH_LINK_UP;
1947 PMD_INIT_LOG(DEBUG, "Port %d is up",
1948 dev->data->port_id);
1951 link.link_status = ETH_LINK_UP;
1953 virtio_dev_atomic_write_link_status(dev, &link);
1955 return (old.link_status == link.link_status) ? -1 : 0;
1959 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1961 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1962 struct virtio_hw *hw = dev->data->dev_private;
1964 if (mask & ETH_VLAN_FILTER_MASK) {
1965 if (rxmode->hw_vlan_filter &&
1966 !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1969 "vlan filtering not available on this host");
1975 if (mask & ETH_VLAN_STRIP_MASK)
1976 hw->vlan_strip = rxmode->hw_vlan_strip;
1982 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1984 uint64_t tso_mask, host_features;
1985 struct virtio_hw *hw = dev->data->dev_private;
1987 dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
1989 dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
1990 dev_info->max_rx_queues =
1991 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
1992 dev_info->max_tx_queues =
1993 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
1994 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
1995 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
1996 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
1997 dev_info->default_txconf = (struct rte_eth_txconf) {
1998 .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
2001 host_features = VTPCI_OPS(hw)->get_features(hw);
2002 dev_info->rx_offload_capa = 0;
2003 if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2004 dev_info->rx_offload_capa |=
2005 DEV_RX_OFFLOAD_TCP_CKSUM |
2006 DEV_RX_OFFLOAD_UDP_CKSUM;
2008 tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2009 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2010 if ((host_features & tso_mask) == tso_mask)
2011 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2013 dev_info->tx_offload_capa = 0;
2014 if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2015 dev_info->tx_offload_capa |=
2016 DEV_TX_OFFLOAD_UDP_CKSUM |
2017 DEV_TX_OFFLOAD_TCP_CKSUM;
2019 tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2020 (1ULL << VIRTIO_NET_F_HOST_TSO6);
2021 if ((hw->guest_features & tso_mask) == tso_mask)
2022 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
2026 * It enables testpmd to collect per queue stats.
2029 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2030 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2031 __rte_unused uint8_t is_rx)
2036 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
2037 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
2038 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");