1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_branch_prediction.h>
19 #include <rte_bus_pci.h>
20 #include <rte_ether.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_cpuflags.h>
27 #include <rte_memory.h>
30 #include <rte_cycles.h>
31 #include <rte_kvargs.h>
33 #include "virtio_ethdev.h"
34 #include "virtio_pci.h"
35 #include "virtio_logs.h"
36 #include "virtqueue.h"
37 #include "virtio_rxtx.h"
39 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
40 static int virtio_dev_configure(struct rte_eth_dev *dev);
41 static int virtio_dev_start(struct rte_eth_dev *dev);
42 static void virtio_dev_stop(struct rte_eth_dev *dev);
43 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
44 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
45 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
46 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
47 static void virtio_dev_info_get(struct rte_eth_dev *dev,
48 struct rte_eth_dev_info *dev_info);
49 static int virtio_dev_link_update(struct rte_eth_dev *dev,
50 int wait_to_complete);
51 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
53 static void virtio_set_hwaddr(struct virtio_hw *hw);
54 static void virtio_get_hwaddr(struct virtio_hw *hw);
56 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
57 struct rte_eth_stats *stats);
58 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
59 struct rte_eth_xstat *xstats, unsigned n);
60 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
61 struct rte_eth_xstat_name *xstats_names,
63 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
64 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
65 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
66 uint16_t vlan_id, int on);
67 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
68 struct ether_addr *mac_addr,
69 uint32_t index, uint32_t vmdq);
70 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
71 static int virtio_mac_addr_set(struct rte_eth_dev *dev,
72 struct ether_addr *mac_addr);
74 static int virtio_intr_enable(struct rte_eth_dev *dev);
75 static int virtio_intr_disable(struct rte_eth_dev *dev);
77 static int virtio_dev_queue_stats_mapping_set(
78 struct rte_eth_dev *eth_dev,
83 int virtio_logtype_init;
84 int virtio_logtype_driver;
86 static void virtio_notify_peers(struct rte_eth_dev *dev);
87 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
90 * The set of PCI devices this driver supports
92 static const struct rte_pci_id pci_id_virtio_map[] = {
93 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
94 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
95 { .vendor_id = 0, /* sentinel */ },
98 struct rte_virtio_xstats_name_off {
99 char name[RTE_ETH_XSTATS_NAME_SIZE];
103 /* [rt]x_qX_ is prepended to the name string here */
104 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
105 {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
106 {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
107 {"errors", offsetof(struct virtnet_rx, stats.errors)},
108 {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
109 {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
110 {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
111 {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
112 {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
113 {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
114 {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
115 {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
116 {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
117 {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
120 /* [rt]x_qX_ is prepended to the name string here */
121 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
122 {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
123 {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
124 {"errors", offsetof(struct virtnet_tx, stats.errors)},
125 {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
126 {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
127 {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
128 {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
129 {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
130 {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
131 {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
132 {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
133 {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
134 {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
137 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
138 sizeof(rte_virtio_rxq_stat_strings[0]))
139 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
140 sizeof(rte_virtio_txq_stat_strings[0]))
142 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
144 static struct virtio_pmd_ctrl *
145 virtio_send_command_packed(struct virtnet_ctl *cvq,
146 struct virtio_pmd_ctrl *ctrl,
147 int *dlen, int pkt_num)
149 struct virtqueue *vq = cvq->vq;
151 struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
152 struct virtio_pmd_ctrl *result;
153 bool avail_wrap_counter;
159 * Format is enforced in qemu code:
160 * One TX packet for header;
161 * At least one TX packet per argument;
162 * One RX packet for ACK.
164 head = vq->vq_avail_idx;
165 avail_wrap_counter = vq->avail_wrap_counter;
166 desc[head].addr = cvq->virtio_net_hdr_mem;
167 desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
170 if (++vq->vq_avail_idx >= vq->vq_nentries) {
171 vq->vq_avail_idx -= vq->vq_nentries;
172 vq->avail_wrap_counter ^= 1;
175 for (k = 0; k < pkt_num; k++) {
176 desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
177 + sizeof(struct virtio_net_ctrl_hdr)
178 + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
179 desc[vq->vq_avail_idx].len = dlen[k];
180 desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
181 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
182 VRING_DESC_F_USED(!vq->avail_wrap_counter);
186 if (++vq->vq_avail_idx >= vq->vq_nentries) {
187 vq->vq_avail_idx -= vq->vq_nentries;
188 vq->avail_wrap_counter ^= 1;
192 desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
193 + sizeof(struct virtio_net_ctrl_hdr);
194 desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
195 desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
196 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
197 VRING_DESC_F_USED(!vq->avail_wrap_counter);
200 if (++vq->vq_avail_idx >= vq->vq_nentries) {
201 vq->vq_avail_idx -= vq->vq_nentries;
202 vq->avail_wrap_counter ^= 1;
205 virtio_wmb(vq->hw->weak_barriers);
206 desc[head].flags = VRING_DESC_F_NEXT |
207 VRING_DESC_F_AVAIL(avail_wrap_counter) |
208 VRING_DESC_F_USED(!avail_wrap_counter);
210 virtio_wmb(vq->hw->weak_barriers);
211 virtqueue_notify(vq);
213 /* wait for used descriptors in virtqueue */
214 while (!desc_is_used(&desc[head], vq))
217 virtio_rmb(vq->hw->weak_barriers);
219 /* now get used descriptors */
220 vq->vq_free_cnt += nb_descs;
221 vq->vq_used_cons_idx += nb_descs;
222 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
223 vq->vq_used_cons_idx -= vq->vq_nentries;
224 vq->used_wrap_counter ^= 1;
227 result = cvq->virtio_net_hdr_mz->addr;
232 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
233 int *dlen, int pkt_num)
237 virtio_net_ctrl_ack status = ~0;
238 struct virtio_pmd_ctrl *result;
239 struct virtqueue *vq;
241 ctrl->status = status;
243 if (!cvq || !cvq->vq) {
244 PMD_INIT_LOG(ERR, "Control queue is not supported.");
248 rte_spinlock_lock(&cvq->lock);
250 head = vq->vq_desc_head_idx;
252 PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
253 "vq->hw->cvq = %p vq = %p",
254 vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
256 if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
257 rte_spinlock_unlock(&cvq->lock);
261 memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
262 sizeof(struct virtio_pmd_ctrl));
264 if (vtpci_packed_queue(vq->hw)) {
265 result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
270 * Format is enforced in qemu code:
271 * One TX packet for header;
272 * At least one TX packet per argument;
273 * One RX packet for ACK.
275 vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
276 vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
277 vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
279 i = vq->vq_ring.desc[head].next;
281 for (k = 0; k < pkt_num; k++) {
282 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
283 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
284 + sizeof(struct virtio_net_ctrl_hdr)
285 + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
286 vq->vq_ring.desc[i].len = dlen[k];
289 i = vq->vq_ring.desc[i].next;
292 vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
293 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
294 + sizeof(struct virtio_net_ctrl_hdr);
295 vq->vq_ring.desc[i].len = sizeof(ctrl->status);
298 vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
300 vq_update_avail_ring(vq, head);
301 vq_update_avail_idx(vq);
303 PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
305 virtqueue_notify(vq);
308 while (VIRTQUEUE_NUSED(vq) == 0) {
313 while (VIRTQUEUE_NUSED(vq)) {
314 uint32_t idx, desc_idx, used_idx;
315 struct vring_used_elem *uep;
317 used_idx = (uint32_t)(vq->vq_used_cons_idx
318 & (vq->vq_nentries - 1));
319 uep = &vq->vq_ring.used->ring[used_idx];
320 idx = (uint32_t) uep->id;
323 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
324 desc_idx = vq->vq_ring.desc[desc_idx].next;
328 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
329 vq->vq_desc_head_idx = idx;
331 vq->vq_used_cons_idx++;
335 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
336 vq->vq_free_cnt, vq->vq_desc_head_idx);
338 result = cvq->virtio_net_hdr_mz->addr;
341 rte_spinlock_unlock(&cvq->lock);
342 return result->status;
346 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
348 struct virtio_hw *hw = dev->data->dev_private;
349 struct virtio_pmd_ctrl ctrl;
353 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
354 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
355 memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
357 dlen[0] = sizeof(uint16_t);
359 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
361 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
362 "failed, this is too late now...");
370 virtio_dev_queue_release(void *queue __rte_unused)
376 virtio_get_nr_vq(struct virtio_hw *hw)
378 uint16_t nr_vq = hw->max_queue_pairs * 2;
380 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
387 virtio_init_vring(struct virtqueue *vq)
389 int size = vq->vq_nentries;
390 struct vring *vr = &vq->vq_ring;
391 uint8_t *ring_mem = vq->vq_ring_virt_mem;
393 PMD_INIT_FUNC_TRACE();
395 memset(ring_mem, 0, vq->vq_ring_size);
397 vq->vq_used_cons_idx = 0;
398 vq->vq_desc_head_idx = 0;
399 vq->vq_avail_idx = 0;
400 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
401 vq->vq_free_cnt = vq->vq_nentries;
402 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
403 if (vtpci_packed_queue(vq->hw)) {
404 vring_init_packed(&vq->ring_packed, ring_mem,
405 VIRTIO_PCI_VRING_ALIGN, size);
406 vring_desc_init_packed(vq, size);
408 vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
409 vring_desc_init_split(vr->desc, size);
412 * Disable device(host) interrupting guest
414 virtqueue_disable_intr(vq);
418 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
420 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
421 char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
422 const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
423 unsigned int vq_size, size;
424 struct virtio_hw *hw = dev->data->dev_private;
425 struct virtnet_rx *rxvq = NULL;
426 struct virtnet_tx *txvq = NULL;
427 struct virtnet_ctl *cvq = NULL;
428 struct virtqueue *vq;
429 size_t sz_hdr_mz = 0;
430 void *sw_ring = NULL;
431 int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
433 int numa_node = dev->device->numa_node;
435 PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
436 vtpci_queue_idx, numa_node);
439 * Read the virtqueue size from the Queue Size field
440 * Always power of 2 and if 0 virtqueue does not exist
442 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
443 PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
445 PMD_INIT_LOG(ERR, "virtqueue does not exist");
449 if (!rte_is_power_of_2(vq_size)) {
450 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
454 snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
455 dev->data->port_id, vtpci_queue_idx);
457 size = RTE_ALIGN_CEIL(sizeof(*vq) +
458 vq_size * sizeof(struct vq_desc_extra),
459 RTE_CACHE_LINE_SIZE);
460 if (queue_type == VTNET_TQ) {
462 * For each xmit packet, allocate a virtio_net_hdr
463 * and indirect ring elements
465 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
466 } else if (queue_type == VTNET_CQ) {
467 /* Allocate a page for control vq command, data and status */
468 sz_hdr_mz = PAGE_SIZE;
471 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
474 PMD_INIT_LOG(ERR, "can not allocate vq");
477 hw->vqs[vtpci_queue_idx] = vq;
480 vq->vq_queue_index = vtpci_queue_idx;
481 vq->vq_nentries = vq_size;
482 vq->event_flags_shadow = 0;
483 if (vtpci_packed_queue(hw)) {
484 vq->avail_wrap_counter = 1;
485 vq->used_wrap_counter = 1;
486 vq->avail_used_flags =
487 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
488 VRING_DESC_F_USED(!vq->avail_wrap_counter);
492 * Reserve a memzone for vring elements
494 size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
495 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
496 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
497 size, vq->vq_ring_size);
499 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
500 numa_node, RTE_MEMZONE_IOVA_CONTIG,
501 VIRTIO_PCI_VRING_ALIGN);
503 if (rte_errno == EEXIST)
504 mz = rte_memzone_lookup(vq_name);
511 memset(mz->addr, 0, mz->len);
513 vq->vq_ring_mem = mz->iova;
514 vq->vq_ring_virt_mem = mz->addr;
515 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
517 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
518 (uint64_t)(uintptr_t)mz->addr);
520 virtio_init_vring(vq);
523 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
524 dev->data->port_id, vtpci_queue_idx);
525 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
526 numa_node, RTE_MEMZONE_IOVA_CONTIG,
527 RTE_CACHE_LINE_SIZE);
528 if (hdr_mz == NULL) {
529 if (rte_errno == EEXIST)
530 hdr_mz = rte_memzone_lookup(vq_hdr_name);
531 if (hdr_mz == NULL) {
538 if (queue_type == VTNET_RQ) {
539 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
540 sizeof(vq->sw_ring[0]);
542 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
543 RTE_CACHE_LINE_SIZE, numa_node);
545 PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
550 vq->sw_ring = sw_ring;
553 rxvq->port_id = dev->data->port_id;
555 } else if (queue_type == VTNET_TQ) {
558 txvq->port_id = dev->data->port_id;
560 txvq->virtio_net_hdr_mz = hdr_mz;
561 txvq->virtio_net_hdr_mem = hdr_mz->iova;
562 } else if (queue_type == VTNET_CQ) {
566 cvq->virtio_net_hdr_mz = hdr_mz;
567 cvq->virtio_net_hdr_mem = hdr_mz->iova;
568 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
573 /* For virtio_user case (that is when hw->dev is NULL), we use
574 * virtual address. And we need properly set _offset_, please see
575 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
577 if (!hw->virtio_user_dev)
578 vq->offset = offsetof(struct rte_mbuf, buf_iova);
580 vq->vq_ring_mem = (uintptr_t)mz->addr;
581 vq->offset = offsetof(struct rte_mbuf, buf_addr);
582 if (queue_type == VTNET_TQ)
583 txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
584 else if (queue_type == VTNET_CQ)
585 cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
588 if (queue_type == VTNET_TQ) {
589 struct virtio_tx_region *txr;
593 memset(txr, 0, vq_size * sizeof(*txr));
594 for (i = 0; i < vq_size; i++) {
595 struct vring_desc *start_dp = txr[i].tx_indir;
596 struct vring_packed_desc *start_dp_packed =
599 /* first indirect descriptor is always the tx header */
600 if (vtpci_packed_queue(hw)) {
601 start_dp_packed->addr = txvq->virtio_net_hdr_mem
603 + offsetof(struct virtio_tx_region,
605 start_dp_packed->len = hw->vtnet_hdr_size;
607 vring_desc_init_split(start_dp,
608 RTE_DIM(txr[i].tx_indir));
609 start_dp->addr = txvq->virtio_net_hdr_mem
611 + offsetof(struct virtio_tx_region,
613 start_dp->len = hw->vtnet_hdr_size;
614 start_dp->flags = VRING_DESC_F_NEXT;
619 if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
620 PMD_INIT_LOG(ERR, "setup_queue failed");
628 rte_memzone_free(hdr_mz);
629 rte_memzone_free(mz);
636 virtio_free_queues(struct virtio_hw *hw)
638 uint16_t nr_vq = virtio_get_nr_vq(hw);
639 struct virtqueue *vq;
646 for (i = 0; i < nr_vq; i++) {
651 queue_type = virtio_get_queue_type(hw, i);
652 if (queue_type == VTNET_RQ) {
653 rte_free(vq->sw_ring);
654 rte_memzone_free(vq->rxq.mz);
655 } else if (queue_type == VTNET_TQ) {
656 rte_memzone_free(vq->txq.mz);
657 rte_memzone_free(vq->txq.virtio_net_hdr_mz);
659 rte_memzone_free(vq->cq.mz);
660 rte_memzone_free(vq->cq.virtio_net_hdr_mz);
672 virtio_alloc_queues(struct rte_eth_dev *dev)
674 struct virtio_hw *hw = dev->data->dev_private;
675 uint16_t nr_vq = virtio_get_nr_vq(hw);
679 hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
681 PMD_INIT_LOG(ERR, "failed to allocate vqs");
685 for (i = 0; i < nr_vq; i++) {
686 ret = virtio_init_queue(dev, i);
688 virtio_free_queues(hw);
696 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
699 virtio_dev_close(struct rte_eth_dev *dev)
701 struct virtio_hw *hw = dev->data->dev_private;
702 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
704 PMD_INIT_LOG(DEBUG, "virtio_dev_close");
711 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
712 VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
714 virtio_queues_unbind_intr(dev);
716 if (intr_conf->lsc || intr_conf->rxq) {
717 virtio_intr_disable(dev);
718 rte_intr_efd_disable(dev->intr_handle);
719 rte_free(dev->intr_handle->intr_vec);
720 dev->intr_handle->intr_vec = NULL;
724 virtio_dev_free_mbufs(dev);
725 virtio_free_queues(hw);
729 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
731 struct virtio_hw *hw = dev->data->dev_private;
732 struct virtio_pmd_ctrl ctrl;
736 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
737 PMD_INIT_LOG(INFO, "host does not support rx control");
741 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
742 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
746 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
748 PMD_INIT_LOG(ERR, "Failed to enable promisc");
752 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
754 struct virtio_hw *hw = dev->data->dev_private;
755 struct virtio_pmd_ctrl ctrl;
759 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
760 PMD_INIT_LOG(INFO, "host does not support rx control");
764 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
765 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
769 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
771 PMD_INIT_LOG(ERR, "Failed to disable promisc");
775 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
777 struct virtio_hw *hw = dev->data->dev_private;
778 struct virtio_pmd_ctrl ctrl;
782 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
783 PMD_INIT_LOG(INFO, "host does not support rx control");
787 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
788 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
792 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
794 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
798 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
800 struct virtio_hw *hw = dev->data->dev_private;
801 struct virtio_pmd_ctrl ctrl;
805 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
806 PMD_INIT_LOG(INFO, "host does not support rx control");
810 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
811 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
815 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
817 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
820 #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
822 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
824 struct virtio_hw *hw = dev->data->dev_private;
825 uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
827 uint32_t frame_size = mtu + ether_hdr_len;
828 uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
830 max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
832 if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
833 PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
834 ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
841 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
843 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
844 struct virtqueue *vq = rxvq->vq;
846 virtqueue_enable_intr(vq);
851 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
853 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
854 struct virtqueue *vq = rxvq->vq;
856 virtqueue_disable_intr(vq);
861 * dev_ops for virtio, bare necessities for basic operation
863 static const struct eth_dev_ops virtio_eth_dev_ops = {
864 .dev_configure = virtio_dev_configure,
865 .dev_start = virtio_dev_start,
866 .dev_stop = virtio_dev_stop,
867 .dev_close = virtio_dev_close,
868 .promiscuous_enable = virtio_dev_promiscuous_enable,
869 .promiscuous_disable = virtio_dev_promiscuous_disable,
870 .allmulticast_enable = virtio_dev_allmulticast_enable,
871 .allmulticast_disable = virtio_dev_allmulticast_disable,
872 .mtu_set = virtio_mtu_set,
873 .dev_infos_get = virtio_dev_info_get,
874 .stats_get = virtio_dev_stats_get,
875 .xstats_get = virtio_dev_xstats_get,
876 .xstats_get_names = virtio_dev_xstats_get_names,
877 .stats_reset = virtio_dev_stats_reset,
878 .xstats_reset = virtio_dev_stats_reset,
879 .link_update = virtio_dev_link_update,
880 .vlan_offload_set = virtio_dev_vlan_offload_set,
881 .rx_queue_setup = virtio_dev_rx_queue_setup,
882 .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
883 .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
884 .rx_queue_release = virtio_dev_queue_release,
885 .rx_descriptor_done = virtio_dev_rx_queue_done,
886 .tx_queue_setup = virtio_dev_tx_queue_setup,
887 .tx_queue_release = virtio_dev_queue_release,
888 /* collect stats per queue */
889 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
890 .vlan_filter_set = virtio_vlan_filter_set,
891 .mac_addr_add = virtio_mac_addr_add,
892 .mac_addr_remove = virtio_mac_addr_remove,
893 .mac_addr_set = virtio_mac_addr_set,
897 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
901 for (i = 0; i < dev->data->nb_tx_queues; i++) {
902 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
906 stats->opackets += txvq->stats.packets;
907 stats->obytes += txvq->stats.bytes;
908 stats->oerrors += txvq->stats.errors;
910 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
911 stats->q_opackets[i] = txvq->stats.packets;
912 stats->q_obytes[i] = txvq->stats.bytes;
916 for (i = 0; i < dev->data->nb_rx_queues; i++) {
917 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
921 stats->ipackets += rxvq->stats.packets;
922 stats->ibytes += rxvq->stats.bytes;
923 stats->ierrors += rxvq->stats.errors;
925 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
926 stats->q_ipackets[i] = rxvq->stats.packets;
927 stats->q_ibytes[i] = rxvq->stats.bytes;
931 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
934 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
935 struct rte_eth_xstat_name *xstats_names,
936 __rte_unused unsigned limit)
942 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
943 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
945 if (xstats_names != NULL) {
946 /* Note: limit checked in rte_eth_xstats_names() */
948 for (i = 0; i < dev->data->nb_rx_queues; i++) {
949 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
952 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
953 snprintf(xstats_names[count].name,
954 sizeof(xstats_names[count].name),
956 rte_virtio_rxq_stat_strings[t].name);
961 for (i = 0; i < dev->data->nb_tx_queues; i++) {
962 struct virtnet_tx *txvq = dev->data->tx_queues[i];
965 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
966 snprintf(xstats_names[count].name,
967 sizeof(xstats_names[count].name),
969 rte_virtio_txq_stat_strings[t].name);
979 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
985 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
986 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
991 for (i = 0; i < dev->data->nb_rx_queues; i++) {
992 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
999 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
1000 xstats[count].value = *(uint64_t *)(((char *)rxvq) +
1001 rte_virtio_rxq_stat_strings[t].offset);
1002 xstats[count].id = count;
1007 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1008 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1015 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
1016 xstats[count].value = *(uint64_t *)(((char *)txvq) +
1017 rte_virtio_txq_stat_strings[t].offset);
1018 xstats[count].id = count;
1027 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1029 virtio_update_stats(dev, stats);
1035 virtio_dev_stats_reset(struct rte_eth_dev *dev)
1039 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1040 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1044 txvq->stats.packets = 0;
1045 txvq->stats.bytes = 0;
1046 txvq->stats.errors = 0;
1047 txvq->stats.multicast = 0;
1048 txvq->stats.broadcast = 0;
1049 memset(txvq->stats.size_bins, 0,
1050 sizeof(txvq->stats.size_bins[0]) * 8);
1053 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1054 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1058 rxvq->stats.packets = 0;
1059 rxvq->stats.bytes = 0;
1060 rxvq->stats.errors = 0;
1061 rxvq->stats.multicast = 0;
1062 rxvq->stats.broadcast = 0;
1063 memset(rxvq->stats.size_bins, 0,
1064 sizeof(rxvq->stats.size_bins[0]) * 8);
1069 virtio_set_hwaddr(struct virtio_hw *hw)
1071 vtpci_write_dev_config(hw,
1072 offsetof(struct virtio_net_config, mac),
1073 &hw->mac_addr, ETHER_ADDR_LEN);
1077 virtio_get_hwaddr(struct virtio_hw *hw)
1079 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
1080 vtpci_read_dev_config(hw,
1081 offsetof(struct virtio_net_config, mac),
1082 &hw->mac_addr, ETHER_ADDR_LEN);
1084 eth_random_addr(&hw->mac_addr[0]);
1085 virtio_set_hwaddr(hw);
1090 virtio_mac_table_set(struct virtio_hw *hw,
1091 const struct virtio_net_ctrl_mac *uc,
1092 const struct virtio_net_ctrl_mac *mc)
1094 struct virtio_pmd_ctrl ctrl;
1097 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1098 PMD_DRV_LOG(INFO, "host does not support mac table");
1102 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1103 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1105 len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
1106 memcpy(ctrl.data, uc, len[0]);
1108 len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
1109 memcpy(ctrl.data + len[0], mc, len[1]);
1111 err = virtio_send_command(hw->cvq, &ctrl, len, 2);
1113 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
1118 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1119 uint32_t index, uint32_t vmdq __rte_unused)
1121 struct virtio_hw *hw = dev->data->dev_private;
1122 const struct ether_addr *addrs = dev->data->mac_addrs;
1124 struct virtio_net_ctrl_mac *uc, *mc;
1126 if (index >= VIRTIO_MAX_MAC_ADDRS) {
1127 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1131 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1133 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1136 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1137 const struct ether_addr *addr
1138 = (i == index) ? mac_addr : addrs + i;
1139 struct virtio_net_ctrl_mac *tbl
1140 = is_multicast_ether_addr(addr) ? mc : uc;
1142 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
1145 return virtio_mac_table_set(hw, uc, mc);
1149 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1151 struct virtio_hw *hw = dev->data->dev_private;
1152 struct ether_addr *addrs = dev->data->mac_addrs;
1153 struct virtio_net_ctrl_mac *uc, *mc;
1156 if (index >= VIRTIO_MAX_MAC_ADDRS) {
1157 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1161 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1163 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1166 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1167 struct virtio_net_ctrl_mac *tbl;
1169 if (i == index || is_zero_ether_addr(addrs + i))
1172 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
1173 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
1176 virtio_mac_table_set(hw, uc, mc);
1180 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1182 struct virtio_hw *hw = dev->data->dev_private;
1184 memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
1186 /* Use atomic update if available */
1187 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1188 struct virtio_pmd_ctrl ctrl;
1189 int len = ETHER_ADDR_LEN;
1191 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1192 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1194 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
1195 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1198 if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1201 virtio_set_hwaddr(hw);
1206 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1208 struct virtio_hw *hw = dev->data->dev_private;
1209 struct virtio_pmd_ctrl ctrl;
1212 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1215 ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1216 ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1217 memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1218 len = sizeof(vlan_id);
1220 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1224 virtio_intr_enable(struct rte_eth_dev *dev)
1226 struct virtio_hw *hw = dev->data->dev_private;
1228 if (rte_intr_enable(dev->intr_handle) < 0)
1231 if (!hw->virtio_user_dev)
1232 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1238 virtio_intr_disable(struct rte_eth_dev *dev)
1240 struct virtio_hw *hw = dev->data->dev_private;
1242 if (rte_intr_disable(dev->intr_handle) < 0)
1245 if (!hw->virtio_user_dev)
1246 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1252 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1254 uint64_t host_features;
1256 /* Prepare guest_features: feature that driver wants to support */
1257 PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1260 /* Read device(host) feature bits */
1261 host_features = VTPCI_OPS(hw)->get_features(hw);
1262 PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1265 /* If supported, ensure MTU value is valid before acknowledging it. */
1266 if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1267 struct virtio_net_config config;
1269 vtpci_read_dev_config(hw,
1270 offsetof(struct virtio_net_config, mtu),
1271 &config.mtu, sizeof(config.mtu));
1273 if (config.mtu < ETHER_MIN_MTU)
1274 req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1278 * Negotiate features: Subset of device feature bits are written back
1279 * guest feature bits.
1281 hw->guest_features = req_features;
1282 hw->guest_features = vtpci_negotiate_features(hw, host_features);
1283 PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1284 hw->guest_features);
1287 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1289 "VIRTIO_F_VERSION_1 features is not enabled.");
1292 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1293 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1295 "failed to set FEATURES_OK status!");
1300 hw->req_guest_features = req_features;
1306 virtio_dev_pause(struct rte_eth_dev *dev)
1308 struct virtio_hw *hw = dev->data->dev_private;
1310 rte_spinlock_lock(&hw->state_lock);
1312 if (hw->started == 0) {
1313 /* Device is just stopped. */
1314 rte_spinlock_unlock(&hw->state_lock);
1319 * Prevent the worker threads from touching queues to avoid contention,
1320 * 1 ms should be enough for the ongoing Tx function to finish.
1327 * Recover hw state to let the worker threads continue.
1330 virtio_dev_resume(struct rte_eth_dev *dev)
1332 struct virtio_hw *hw = dev->data->dev_private;
1335 rte_spinlock_unlock(&hw->state_lock);
1339 * Should be called only after device is paused.
1342 virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
1345 struct virtio_hw *hw = dev->data->dev_private;
1346 struct virtnet_tx *txvq = dev->data->tx_queues[0];
1349 hw->inject_pkts = tx_pkts;
1350 ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
1351 hw->inject_pkts = NULL;
1357 virtio_notify_peers(struct rte_eth_dev *dev)
1359 struct virtio_hw *hw = dev->data->dev_private;
1360 struct virtnet_rx *rxvq;
1361 struct rte_mbuf *rarp_mbuf;
1363 if (!dev->data->rx_queues)
1366 rxvq = dev->data->rx_queues[0];
1370 rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
1371 (struct ether_addr *)hw->mac_addr);
1372 if (rarp_mbuf == NULL) {
1373 PMD_DRV_LOG(ERR, "failed to make RARP packet.");
1377 /* If virtio port just stopped, no need to send RARP */
1378 if (virtio_dev_pause(dev) < 0) {
1379 rte_pktmbuf_free(rarp_mbuf);
1383 virtio_inject_pkts(dev, &rarp_mbuf, 1);
1384 virtio_dev_resume(dev);
1388 virtio_ack_link_announce(struct rte_eth_dev *dev)
1390 struct virtio_hw *hw = dev->data->dev_private;
1391 struct virtio_pmd_ctrl ctrl;
1393 ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
1394 ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
1396 virtio_send_command(hw->cvq, &ctrl, NULL, 0);
1400 * Process virtio config changed interrupt. Call the callback
1401 * if link state changed, generate gratuitous RARP packet if
1402 * the status indicates an ANNOUNCE.
1405 virtio_interrupt_handler(void *param)
1407 struct rte_eth_dev *dev = param;
1408 struct virtio_hw *hw = dev->data->dev_private;
1412 /* Read interrupt status which clears interrupt */
1413 isr = vtpci_isr(hw);
1414 PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1416 if (virtio_intr_enable(dev) < 0)
1417 PMD_DRV_LOG(ERR, "interrupt enable failed");
1419 if (isr & VIRTIO_PCI_ISR_CONFIG) {
1420 if (virtio_dev_link_update(dev, 0) == 0)
1421 _rte_eth_dev_callback_process(dev,
1422 RTE_ETH_EVENT_INTR_LSC,
1425 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1426 vtpci_read_dev_config(hw,
1427 offsetof(struct virtio_net_config, status),
1428 &status, sizeof(status));
1429 if (status & VIRTIO_NET_S_ANNOUNCE) {
1430 virtio_notify_peers(dev);
1432 virtio_ack_link_announce(dev);
1438 /* set rx and tx handlers according to what is supported */
1440 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1442 struct virtio_hw *hw = eth_dev->data->dev_private;
1444 if (vtpci_packed_queue(hw)) {
1446 "virtio: using packed ring standard Tx path on port %u",
1447 eth_dev->data->port_id);
1448 eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1450 if (hw->use_inorder_tx) {
1451 PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1452 eth_dev->data->port_id);
1453 eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1455 PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1456 eth_dev->data->port_id);
1457 eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1461 if (vtpci_packed_queue(hw)) {
1462 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1464 "virtio: using packed ring mergeable buffer Rx path on port %u",
1465 eth_dev->data->port_id);
1466 eth_dev->rx_pkt_burst =
1467 &virtio_recv_mergeable_pkts_packed;
1470 "virtio: using packed ring standard Rx path on port %u",
1471 eth_dev->data->port_id);
1472 eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1475 if (hw->use_simple_rx) {
1476 PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
1477 eth_dev->data->port_id);
1478 eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1479 } else if (hw->use_inorder_rx) {
1481 "virtio: using inorder Rx path on port %u",
1482 eth_dev->data->port_id);
1483 eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
1484 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1486 "virtio: using mergeable buffer Rx path on port %u",
1487 eth_dev->data->port_id);
1488 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1490 PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1491 eth_dev->data->port_id);
1492 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1498 /* Only support 1:1 queue/interrupt mapping so far.
1499 * TODO: support n:1 queue/interrupt mapping when there are limited number of
1500 * interrupt vectors (<N+1).
1503 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1506 struct virtio_hw *hw = dev->data->dev_private;
1508 PMD_INIT_LOG(INFO, "queue/interrupt binding");
1509 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1510 dev->intr_handle->intr_vec[i] = i + 1;
1511 if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1512 VIRTIO_MSI_NO_VECTOR) {
1513 PMD_DRV_LOG(ERR, "failed to set queue vector");
1522 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1525 struct virtio_hw *hw = dev->data->dev_private;
1527 PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1528 for (i = 0; i < dev->data->nb_rx_queues; ++i)
1529 VTPCI_OPS(hw)->set_queue_irq(hw,
1530 hw->vqs[i * VTNET_CQ],
1531 VIRTIO_MSI_NO_VECTOR);
1535 virtio_configure_intr(struct rte_eth_dev *dev)
1537 struct virtio_hw *hw = dev->data->dev_private;
1539 if (!rte_intr_cap_multiple(dev->intr_handle)) {
1540 PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1544 if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
1545 PMD_INIT_LOG(ERR, "Fail to create eventfd");
1549 if (!dev->intr_handle->intr_vec) {
1550 dev->intr_handle->intr_vec =
1551 rte_zmalloc("intr_vec",
1552 hw->max_queue_pairs * sizeof(int), 0);
1553 if (!dev->intr_handle->intr_vec) {
1554 PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1555 hw->max_queue_pairs);
1560 /* Re-register callback to update max_intr */
1561 rte_intr_callback_unregister(dev->intr_handle,
1562 virtio_interrupt_handler,
1564 rte_intr_callback_register(dev->intr_handle,
1565 virtio_interrupt_handler,
1568 /* DO NOT try to remove this! This function will enable msix, or QEMU
1569 * will encounter SIGSEGV when DRIVER_OK is sent.
1570 * And for legacy devices, this should be done before queue/vec binding
1571 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1572 * (22) will be ignored.
1574 if (virtio_intr_enable(dev) < 0) {
1575 PMD_DRV_LOG(ERR, "interrupt enable failed");
1579 if (virtio_queues_bind_intr(dev) < 0) {
1580 PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1587 /* reset device and renegotiate features if needed */
1589 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1591 struct virtio_hw *hw = eth_dev->data->dev_private;
1592 struct virtio_net_config *config;
1593 struct virtio_net_config local_config;
1594 struct rte_pci_device *pci_dev = NULL;
1597 /* Reset the device although not necessary at startup */
1601 virtio_dev_free_mbufs(eth_dev);
1602 virtio_free_queues(hw);
1605 /* Tell the host we've noticed this device. */
1606 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1608 /* Tell the host we've known how to drive the device. */
1609 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1610 if (virtio_negotiate_features(hw, req_features) < 0)
1613 hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
1615 if (!hw->virtio_user_dev) {
1616 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1617 rte_eth_copy_pci_info(eth_dev, pci_dev);
1620 /* If host does not support both status and MSI-X then disable LSC */
1621 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
1622 hw->use_msix != VIRTIO_MSIX_NONE)
1623 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1625 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1627 /* Setting up rx_header size for the device */
1628 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1629 vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
1630 vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
1631 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1633 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1635 /* Copy the permanent MAC address to: virtio_hw */
1636 virtio_get_hwaddr(hw);
1637 ether_addr_copy((struct ether_addr *) hw->mac_addr,
1638 ð_dev->data->mac_addrs[0]);
1640 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1641 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1642 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1644 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1645 config = &local_config;
1647 vtpci_read_dev_config(hw,
1648 offsetof(struct virtio_net_config, mac),
1649 &config->mac, sizeof(config->mac));
1651 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1652 vtpci_read_dev_config(hw,
1653 offsetof(struct virtio_net_config, status),
1654 &config->status, sizeof(config->status));
1657 "VIRTIO_NET_F_STATUS is not supported");
1661 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1662 vtpci_read_dev_config(hw,
1663 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1664 &config->max_virtqueue_pairs,
1665 sizeof(config->max_virtqueue_pairs));
1668 "VIRTIO_NET_F_MQ is not supported");
1669 config->max_virtqueue_pairs = 1;
1672 hw->max_queue_pairs = config->max_virtqueue_pairs;
1674 if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
1675 vtpci_read_dev_config(hw,
1676 offsetof(struct virtio_net_config, mtu),
1678 sizeof(config->mtu));
1681 * MTU value has already been checked at negotiation
1682 * time, but check again in case it has changed since
1683 * then, which should not happen.
1685 if (config->mtu < ETHER_MIN_MTU) {
1686 PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1691 hw->max_mtu = config->mtu;
1692 /* Set initial MTU to maximum one supported by vhost */
1693 eth_dev->data->mtu = config->mtu;
1696 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
1697 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1700 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1701 config->max_virtqueue_pairs);
1702 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1704 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1705 config->mac[0], config->mac[1],
1706 config->mac[2], config->mac[3],
1707 config->mac[4], config->mac[5]);
1709 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1710 hw->max_queue_pairs = 1;
1711 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
1712 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1715 ret = virtio_alloc_queues(eth_dev);
1719 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1720 if (virtio_configure_intr(eth_dev) < 0) {
1721 PMD_INIT_LOG(ERR, "failed to configure interrupt");
1726 vtpci_reinit_complete(hw);
1729 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1730 eth_dev->data->port_id, pci_dev->id.vendor_id,
1731 pci_dev->id.device_id);
1737 * Remap the PCI device again (IO port map for legacy device and
1738 * memory map for modern device), so that the secondary process
1739 * could have the PCI initiated correctly.
1742 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
1746 * We don't have to re-parse the PCI config space, since
1747 * rte_pci_map_device() makes sure the mapped address
1748 * in secondary process would equal to the one mapped in
1749 * the primary process: error will be returned if that
1750 * requirement is not met.
1752 * That said, we could simply reuse all cap pointers
1753 * (such as dev_cfg, common_cfg, etc.) parsed from the
1754 * primary process, which is stored in shared memory.
1756 if (rte_pci_map_device(pci_dev)) {
1757 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
1761 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
1769 virtio_set_vtpci_ops(struct virtio_hw *hw)
1771 #ifdef RTE_VIRTIO_USER
1772 if (hw->virtio_user_dev)
1773 VTPCI_OPS(hw) = &virtio_user_ops;
1777 VTPCI_OPS(hw) = &modern_ops;
1779 VTPCI_OPS(hw) = &legacy_ops;
1783 * This function is based on probe() function in virtio_pci.c
1784 * It returns 0 on success.
1787 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1789 struct virtio_hw *hw = eth_dev->data->dev_private;
1792 RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
1794 eth_dev->dev_ops = &virtio_eth_dev_ops;
1796 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1797 if (!hw->virtio_user_dev) {
1798 ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1803 virtio_set_vtpci_ops(hw);
1804 set_rxtx_funcs(eth_dev);
1809 /* Allocate memory for storing MAC addresses */
1810 eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1811 if (eth_dev->data->mac_addrs == NULL) {
1813 "Failed to allocate %d bytes needed to store MAC addresses",
1814 VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1818 hw->port_id = eth_dev->data->port_id;
1819 /* For virtio_user case the hw->virtio_user_dev is populated by
1820 * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
1822 if (!hw->virtio_user_dev) {
1823 ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1828 /* reset device and negotiate default features */
1829 ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1836 rte_free(eth_dev->data->mac_addrs);
1841 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1843 PMD_INIT_FUNC_TRACE();
1845 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1848 virtio_dev_stop(eth_dev);
1849 virtio_dev_close(eth_dev);
1851 eth_dev->dev_ops = NULL;
1852 eth_dev->tx_pkt_burst = NULL;
1853 eth_dev->rx_pkt_burst = NULL;
1855 if (eth_dev->device)
1856 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
1858 PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1863 static int vdpa_check_handler(__rte_unused const char *key,
1864 const char *value, __rte_unused void *opaque)
1866 if (strcmp(value, "1"))
1873 vdpa_mode_selected(struct rte_devargs *devargs)
1875 struct rte_kvargs *kvlist;
1876 const char *key = "vdpa";
1879 if (devargs == NULL)
1882 kvlist = rte_kvargs_parse(devargs->args, NULL);
1886 if (!rte_kvargs_count(kvlist, key))
1889 /* vdpa mode selected when there's a key-value pair: vdpa=1 */
1890 if (rte_kvargs_process(kvlist, key,
1891 vdpa_check_handler, NULL) < 0) {
1897 rte_kvargs_free(kvlist);
1901 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1902 struct rte_pci_device *pci_dev)
1904 if (rte_eal_iopl_init() != 0) {
1905 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1909 /* virtio pmd skips probe if device needs to work in vdpa mode */
1910 if (vdpa_mode_selected(pci_dev->device.devargs))
1913 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
1914 eth_virtio_dev_init);
1917 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
1919 return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
1922 static struct rte_pci_driver rte_virtio_pmd = {
1924 .name = "net_virtio",
1926 .id_table = pci_id_virtio_map,
1928 .probe = eth_virtio_pci_probe,
1929 .remove = eth_virtio_pci_remove,
1932 RTE_INIT(rte_virtio_pmd_init)
1934 rte_eal_iopl_init();
1935 rte_pci_register(&rte_virtio_pmd);
1939 rx_offload_enabled(struct virtio_hw *hw)
1941 return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
1942 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
1943 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
1947 tx_offload_enabled(struct virtio_hw *hw)
1949 return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
1950 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
1951 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
1955 * Configure virtio device
1956 * It returns 0 on success.
1959 virtio_dev_configure(struct rte_eth_dev *dev)
1961 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1962 const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1963 struct virtio_hw *hw = dev->data->dev_private;
1964 uint64_t rx_offloads = rxmode->offloads;
1965 uint64_t tx_offloads = txmode->offloads;
1966 uint64_t req_features;
1969 PMD_INIT_LOG(DEBUG, "configure");
1970 req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
1972 if (dev->data->dev_conf.intr_conf.rxq) {
1973 ret = virtio_init_device(dev, hw->req_guest_features);
1978 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
1979 DEV_RX_OFFLOAD_TCP_CKSUM))
1980 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
1982 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
1984 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
1985 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
1987 if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
1988 DEV_TX_OFFLOAD_TCP_CKSUM))
1989 req_features |= (1ULL << VIRTIO_NET_F_CSUM);
1991 if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
1993 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1994 (1ULL << VIRTIO_NET_F_HOST_TSO6);
1996 /* if request features changed, reinit the device */
1997 if (req_features != hw->req_guest_features) {
1998 ret = virtio_init_device(dev, req_features);
2003 if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
2004 DEV_RX_OFFLOAD_TCP_CKSUM)) &&
2005 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
2007 "rx checksum not available on this host");
2011 if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
2012 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2013 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
2015 "Large Receive Offload not available on this host");
2019 /* start control queue */
2020 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
2021 virtio_dev_cq_start(dev);
2023 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2026 if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2027 && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2029 "vlan filtering not available on this host");
2033 hw->has_tx_offload = tx_offload_enabled(hw);
2034 hw->has_rx_offload = rx_offload_enabled(hw);
2036 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2037 /* Enable vector (0) for Link State Intrerrupt */
2038 if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
2039 VIRTIO_MSI_NO_VECTOR) {
2040 PMD_DRV_LOG(ERR, "failed to set config vector");
2044 rte_spinlock_init(&hw->state_lock);
2046 hw->use_simple_rx = 1;
2048 if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
2049 hw->use_inorder_tx = 1;
2050 hw->use_inorder_rx = 1;
2051 hw->use_simple_rx = 0;
2054 if (vtpci_packed_queue(hw)) {
2055 hw->use_simple_rx = 0;
2056 hw->use_inorder_rx = 0;
2057 hw->use_inorder_tx = 0;
2060 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
2061 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2062 hw->use_simple_rx = 0;
2065 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2066 hw->use_simple_rx = 0;
2069 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
2070 DEV_RX_OFFLOAD_TCP_CKSUM |
2071 DEV_RX_OFFLOAD_TCP_LRO |
2072 DEV_RX_OFFLOAD_VLAN_STRIP))
2073 hw->use_simple_rx = 0;
2082 virtio_dev_start(struct rte_eth_dev *dev)
2084 uint16_t nb_queues, i;
2085 struct virtnet_rx *rxvq;
2086 struct virtnet_tx *txvq __rte_unused;
2087 struct virtio_hw *hw = dev->data->dev_private;
2090 /* Finish the initialization of the queues */
2091 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2092 ret = virtio_dev_rx_queue_setup_finish(dev, i);
2096 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2097 ret = virtio_dev_tx_queue_setup_finish(dev, i);
2102 /* check if lsc interrupt feature is enabled */
2103 if (dev->data->dev_conf.intr_conf.lsc) {
2104 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
2105 PMD_DRV_LOG(ERR, "link status not supported by host");
2110 /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
2111 * in device configure, but it could be unmapped when device is
2114 if (dev->data->dev_conf.intr_conf.lsc ||
2115 dev->data->dev_conf.intr_conf.rxq) {
2116 virtio_intr_disable(dev);
2118 /* Setup interrupt callback */
2119 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2120 rte_intr_callback_register(dev->intr_handle,
2121 virtio_interrupt_handler,
2124 if (virtio_intr_enable(dev) < 0) {
2125 PMD_DRV_LOG(ERR, "interrupt enable failed");
2130 /*Notify the backend
2131 *Otherwise the tap backend might already stop its queue due to fullness.
2132 *vhost backend will have no chance to be waked up
2134 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2135 if (hw->max_queue_pairs > 1) {
2136 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
2140 PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
2142 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2143 rxvq = dev->data->rx_queues[i];
2144 /* Flush the old packets */
2145 virtqueue_rxvq_flush(rxvq->vq);
2146 virtqueue_notify(rxvq->vq);
2149 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2150 txvq = dev->data->tx_queues[i];
2151 virtqueue_notify(txvq->vq);
2154 PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
2156 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2157 rxvq = dev->data->rx_queues[i];
2158 VIRTQUEUE_DUMP(rxvq->vq);
2161 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2162 txvq = dev->data->tx_queues[i];
2163 VIRTQUEUE_DUMP(txvq->vq);
2166 set_rxtx_funcs(dev);
2169 /* Initialize Link state */
2170 virtio_dev_link_update(dev, 0);
2175 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
2177 struct virtio_hw *hw = dev->data->dev_private;
2178 uint16_t nr_vq = virtio_get_nr_vq(hw);
2179 const char *type __rte_unused;
2180 unsigned int i, mbuf_num = 0;
2181 struct virtqueue *vq;
2182 struct rte_mbuf *buf;
2185 if (hw->vqs == NULL)
2188 for (i = 0; i < nr_vq; i++) {
2193 queue_type = virtio_get_queue_type(hw, i);
2194 if (queue_type == VTNET_RQ)
2196 else if (queue_type == VTNET_TQ)
2202 "Before freeing %s[%d] used and unused buf",
2206 while ((buf = virtqueue_detach_unused(vq)) != NULL) {
2207 rte_pktmbuf_free(buf);
2212 "After freeing %s[%d] used and unused buf",
2217 PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
2221 * Stop device: disable interrupt and mark link down
2224 virtio_dev_stop(struct rte_eth_dev *dev)
2226 struct virtio_hw *hw = dev->data->dev_private;
2227 struct rte_eth_link link;
2228 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
2230 PMD_INIT_LOG(DEBUG, "stop");
2232 rte_spinlock_lock(&hw->state_lock);
2235 hw->started = false;
2237 if (intr_conf->lsc || intr_conf->rxq) {
2238 virtio_intr_disable(dev);
2240 /* Reset interrupt callback */
2241 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2242 rte_intr_callback_unregister(dev->intr_handle,
2243 virtio_interrupt_handler,
2248 memset(&link, 0, sizeof(link));
2249 rte_eth_linkstatus_set(dev, &link);
2251 rte_spinlock_unlock(&hw->state_lock);
2255 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2257 struct rte_eth_link link;
2259 struct virtio_hw *hw = dev->data->dev_private;
2261 memset(&link, 0, sizeof(link));
2262 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2263 link.link_speed = ETH_SPEED_NUM_10G;
2264 link.link_autoneg = ETH_LINK_FIXED;
2267 link.link_status = ETH_LINK_DOWN;
2268 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
2269 PMD_INIT_LOG(DEBUG, "Get link status from hw");
2270 vtpci_read_dev_config(hw,
2271 offsetof(struct virtio_net_config, status),
2272 &status, sizeof(status));
2273 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2274 link.link_status = ETH_LINK_DOWN;
2275 PMD_INIT_LOG(DEBUG, "Port %d is down",
2276 dev->data->port_id);
2278 link.link_status = ETH_LINK_UP;
2279 PMD_INIT_LOG(DEBUG, "Port %d is up",
2280 dev->data->port_id);
2283 link.link_status = ETH_LINK_UP;
2286 return rte_eth_linkstatus_set(dev, &link);
2290 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2292 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2293 struct virtio_hw *hw = dev->data->dev_private;
2294 uint64_t offloads = rxmode->offloads;
2296 if (mask & ETH_VLAN_FILTER_MASK) {
2297 if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
2298 !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2301 "vlan filtering not available on this host");
2307 if (mask & ETH_VLAN_STRIP_MASK)
2308 hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
2314 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2316 uint64_t tso_mask, host_features;
2317 struct virtio_hw *hw = dev->data->dev_private;
2319 dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
2321 dev_info->max_rx_queues =
2322 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2323 dev_info->max_tx_queues =
2324 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2325 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2326 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2327 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2329 host_features = VTPCI_OPS(hw)->get_features(hw);
2330 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
2331 if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2332 dev_info->rx_offload_capa |=
2333 DEV_RX_OFFLOAD_TCP_CKSUM |
2334 DEV_RX_OFFLOAD_UDP_CKSUM;
2336 if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2337 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
2338 tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2339 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2340 if ((host_features & tso_mask) == tso_mask)
2341 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2343 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
2344 DEV_TX_OFFLOAD_VLAN_INSERT;
2345 if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2346 dev_info->tx_offload_capa |=
2347 DEV_TX_OFFLOAD_UDP_CKSUM |
2348 DEV_TX_OFFLOAD_TCP_CKSUM;
2350 tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2351 (1ULL << VIRTIO_NET_F_HOST_TSO6);
2352 if ((host_features & tso_mask) == tso_mask)
2353 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
2357 * It enables testpmd to collect per queue stats.
2360 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2361 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2362 __rte_unused uint8_t is_rx)
2367 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
2368 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
2369 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
2371 RTE_INIT(virtio_init_log)
2373 virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
2374 if (virtio_logtype_init >= 0)
2375 rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
2376 virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
2377 if (virtio_logtype_driver >= 0)
2378 rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);