4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #ifdef RTE_EXEC_ENV_LINUXAPP
43 #include <rte_ethdev.h>
44 #include <rte_memcpy.h>
45 #include <rte_string_fns.h>
46 #include <rte_memzone.h>
47 #include <rte_malloc.h>
48 #include <rte_atomic.h>
49 #include <rte_branch_prediction.h>
51 #include <rte_ether.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
57 #include "virtio_ethdev.h"
58 #include "virtio_pci.h"
59 #include "virtio_logs.h"
60 #include "virtqueue.h"
63 static int eth_virtio_dev_init(struct eth_driver *eth_drv,
64 struct rte_eth_dev *eth_dev);
65 static int virtio_dev_configure(struct rte_eth_dev *dev);
66 static int virtio_dev_start(struct rte_eth_dev *dev);
67 static void virtio_dev_stop(struct rte_eth_dev *dev);
68 static void virtio_dev_info_get(struct rte_eth_dev *dev,
69 struct rte_eth_dev_info *dev_info);
70 static int virtio_dev_link_update(struct rte_eth_dev *dev,
71 __rte_unused int wait_to_complete);
73 static void virtio_set_hwaddr(struct virtio_hw *hw);
74 static void virtio_get_hwaddr(struct virtio_hw *hw);
76 static void virtio_dev_rx_queue_release(__rte_unused void *rxq);
77 static void virtio_dev_tx_queue_release(__rte_unused void *txq);
79 static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
80 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
81 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
84 * The set of PCI devices this driver supports
86 static struct rte_pci_id pci_id_virtio_map[] = {
88 #define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
89 #include "rte_pci_dev_ids.h"
91 { .vendor_id = 0, /* sentinel */ },
94 int virtio_dev_queue_setup(struct rte_eth_dev *dev,
97 uint8_t vtpci_queue_idx,
99 unsigned int socket_id,
100 struct virtqueue **pvq)
102 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
103 const struct rte_memzone *mz;
106 struct virtio_hw *hw =
107 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
108 struct virtqueue *vq = NULL;
110 /* Write the virtqueue index to the Queue Select Field */
111 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vtpci_queue_idx);
112 PMD_INIT_LOG(DEBUG, "selecting queue: %d\n", vtpci_queue_idx);
115 * Read the virtqueue size from the Queue Size field
116 * Always power of 2 and if 0 virtqueue does not exist
118 vq_size = VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
119 PMD_INIT_LOG(DEBUG, "vq_size: %d nb_desc:%d\n", vq_size, nb_desc);
123 PMD_INIT_LOG(ERR, "%s: virtqueue does not exist\n", __func__);
125 } else if (!rte_is_power_of_2(vq_size)) {
126 PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2\n", __func__);
128 } else if (nb_desc != vq_size) {
129 PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size\n",
134 if (queue_type == VTNET_RQ) {
135 rte_snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
136 dev->data->port_id, queue_idx);
137 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
138 vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
139 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
140 } else if(queue_type == VTNET_TQ) {
141 rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
142 dev->data->port_id, queue_idx);
143 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
144 vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
145 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
146 } else if(queue_type == VTNET_CQ) {
147 rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
149 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue),
151 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
154 PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue\n", __func__);
158 vq->port_id = dev->data->port_id;
159 vq->queue_id = queue_idx;
160 vq->vq_queue_index = vtpci_queue_idx;
161 vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
162 vq->vq_nentries = vq_size;
163 vq->vq_free_cnt = vq_size;
166 * Reserve a memzone for vring elements
168 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
169 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
170 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d\n", size, vq->vq_ring_size);
172 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
173 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
179 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
180 * and only accepts 32 bit page frame number.
181 * Check if the allocated physical memory exceeds 16TB.
183 if ( (mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32) ) {
184 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!\n");
188 memset(mz->addr, 0, sizeof(mz->len));
190 vq->vq_ring_mem = mz->phys_addr;
191 vq->vq_ring_virt_mem = mz->addr;
192 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64"\n", (uint64_t)mz->phys_addr);
193 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64"\n", (uint64_t)mz->addr);
194 vq->virtio_net_hdr_mz = NULL;
195 vq->virtio_net_hdr_mem = (void *)NULL;
197 if (queue_type == VTNET_TQ) {
199 * For each xmit packet, allocate a virtio_net_hdr
201 rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
202 dev->data->port_id, queue_idx);
203 vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
204 vq_size * sizeof(struct virtio_net_hdr),
205 socket_id, 0, CACHE_LINE_SIZE);
206 if (vq->virtio_net_hdr_mz == NULL) {
210 vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
211 memset(vq->virtio_net_hdr_mz->addr, 0, vq_size * sizeof(struct virtio_net_hdr));
212 } else if (queue_type == VTNET_CQ) {
213 /* Allocate a page for control vq command, data and status */
214 rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
216 vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
217 PAGE_SIZE, socket_id, 0, CACHE_LINE_SIZE);
218 if (vq->virtio_net_hdr_mz == NULL) {
222 vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
223 memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
227 * Set guest physical address of the virtqueue
228 * in VIRTIO_PCI_QUEUE_PFN config register of device
230 VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
231 mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
237 virtio_dev_cq_queue_setup(struct rte_eth_dev *dev,
238 unsigned int socket_id)
240 struct virtqueue *vq;
241 uint16_t nb_desc = 0;
243 struct virtio_hw *hw =
244 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
246 PMD_INIT_FUNC_TRACE();
247 ret = virtio_dev_queue_setup(dev, VTNET_CQ, 0, VTNET_SQ_CQ_QUEUE_IDX,
248 nb_desc, socket_id, &vq);
250 PMD_INIT_LOG(ERR, "control vq initialization failed\n");
259 virtio_dev_close(struct rte_eth_dev *dev)
261 PMD_INIT_LOG(DEBUG, "virtio_dev_close");
263 virtio_dev_stop(dev);
268 * dev_ops for virtio, bare necessities for basic operation
270 static struct eth_dev_ops virtio_eth_dev_ops = {
271 .dev_configure = virtio_dev_configure,
272 .dev_start = virtio_dev_start,
273 .dev_stop = virtio_dev_stop,
274 .dev_close = virtio_dev_close,
276 .dev_infos_get = virtio_dev_info_get,
277 .stats_get = virtio_dev_stats_get,
278 .stats_reset = virtio_dev_stats_reset,
279 .link_update = virtio_dev_link_update,
280 .mac_addr_add = NULL,
281 .mac_addr_remove = NULL,
282 .rx_queue_setup = virtio_dev_rx_queue_setup,
283 .rx_queue_release = virtio_dev_rx_queue_release, /* meaningfull only to multiple queue */
284 .tx_queue_setup = virtio_dev_tx_queue_setup,
285 .tx_queue_release = virtio_dev_tx_queue_release /* meaningfull only to multiple queue */
289 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
290 struct rte_eth_link *link)
292 struct rte_eth_link *dst = link;
293 struct rte_eth_link *src = &(dev->data->dev_link);
295 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
296 *(uint64_t *)src) == 0)
303 * Atomically writes the link status information into global
304 * structure rte_eth_dev.
307 * - Pointer to the structure rte_eth_dev to read from.
308 * - Pointer to the buffer to be saved with the link status.
311 * - On success, zero.
312 * - On failure, negative value.
315 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
316 struct rte_eth_link *link)
318 struct rte_eth_link *dst = &(dev->data->dev_link);
319 struct rte_eth_link *src = link;
321 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
322 *(uint64_t *)src) == 0)
329 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
331 struct virtio_hw *hw =
332 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
334 memcpy(stats, &hw->eth_stats, sizeof(*stats));
338 virtio_dev_stats_reset(struct rte_eth_dev *dev)
340 struct virtio_hw *hw =
341 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
342 /* Reset software totals */
343 memset(&hw->eth_stats, 0, sizeof(hw->eth_stats));
347 virtio_set_hwaddr(struct virtio_hw *hw)
349 vtpci_write_dev_config(hw,
350 offsetof(struct virtio_net_config, mac),
351 &hw->mac_addr, ETHER_ADDR_LEN);
355 virtio_get_hwaddr(struct virtio_hw *hw)
357 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
358 vtpci_read_dev_config(hw,
359 offsetof(struct virtio_net_config, mac),
360 &hw->mac_addr, ETHER_ADDR_LEN);
362 eth_random_addr(&hw->mac_addr[0]);
363 virtio_set_hwaddr(hw);
369 virtio_negotiate_features(struct virtio_hw *hw)
371 uint32_t guest_features, mask;
372 mask = VIRTIO_NET_F_CTRL_VQ | VIRTIO_NET_F_CTRL_RX | VIRTIO_NET_F_CTRL_VLAN;
373 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM ;
375 /* TSO and LRO are only available when their corresponding
376 * checksum offload feature is also negotiated.
378 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN;
379 mask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;
380 mask |= VTNET_LRO_FEATURES;
382 /* rx_mbuf should not be in multiple merged segments */
383 mask |= VIRTIO_NET_F_MRG_RXBUF;
385 /* not negotiating INDIRECT descriptor table support */
386 mask |= VIRTIO_RING_F_INDIRECT_DESC;
388 /* Prepare guest_features: feature that driver wants to support */
389 guest_features = VTNET_FEATURES & ~mask;
391 /* Read device(host) feature bits */
392 hw->host_features = VIRTIO_READ_REG_4(hw, VIRTIO_PCI_HOST_FEATURES);
394 /* Negotiate features: Subset of device feature bits are written back (guest feature bits) */
395 hw->guest_features = vtpci_negotiate_features(hw, guest_features);
398 #ifdef RTE_EXEC_ENV_LINUXAPP
400 parse_sysfs_value(const char *filename, unsigned long *val)
406 if ((f = fopen(filename, "r")) == NULL) {
407 PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s\n",
412 if (fgets(buf, sizeof(buf), f) == NULL) {
413 PMD_INIT_LOG(ERR, "%s(): cannot read sysfs value %s\n",
418 *val = strtoul(buf, &end, 0);
419 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
420 PMD_INIT_LOG(ERR, "%s(): cannot parse sysfs value %s\n",
429 static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
431 unsigned int uio_num;
434 char dirname[PATH_MAX];
436 /* depending on kernel version, uio can be located in uio/uioX
438 rte_snprintf(dirname, sizeof(dirname),
439 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
440 loc->domain, loc->bus, loc->devid, loc->function);
441 dir = opendir(dirname);
443 /* retry with the parent directory */
444 rte_snprintf(dirname, sizeof(dirname),
445 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
446 loc->domain, loc->bus, loc->devid, loc->function);
447 dir = opendir(dirname);
450 PMD_INIT_LOG(ERR, "Cannot opendir %s\n", dirname);
455 /* take the first file starting with "uio" */
456 while ((e = readdir(dir)) != NULL) {
457 /* format could be uio%d ...*/
458 int shortprefix_len = sizeof("uio") - 1;
459 /* ... or uio:uio%d */
460 int longprefix_len = sizeof("uio:uio") - 1;
463 if (strncmp(e->d_name, "uio", 3) != 0)
466 /* first try uio%d */
468 uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
469 if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
470 rte_snprintf(buf, buflen, "%s/uio%u", dirname, uio_num);
474 /* then try uio:uio%d */
476 uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
477 if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
478 rte_snprintf(buf, buflen, "%s/uio:uio%u", dirname,
485 /* No uio resource found */
487 PMD_INIT_LOG(ERR, "Could not find uio resource\n");
496 * This function is based on probe() function in virtio_pci.c
497 * It returns 0 on success.
500 eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
501 struct rte_eth_dev *eth_dev)
503 struct rte_pci_device *pci_dev;
504 struct virtio_hw *hw =
505 VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
506 if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr) ) {
508 "MBUF HEADROOM should be enough to hold virtio net hdr\n");
512 if (! (rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
514 "IOPL call failed in EAL init - cannot use virtio PMD driver\n");
518 eth_dev->dev_ops = &virtio_eth_dev_ops;
519 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
520 eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
522 if(rte_eal_process_type() == RTE_PROC_SECONDARY)
525 pci_dev = eth_dev->pci_dev;
527 hw->device_id = pci_dev->id.device_id;
528 hw->vendor_id = pci_dev->id.vendor_id;
529 #ifdef RTE_EXEC_ENV_LINUXAPP
531 char dirname[PATH_MAX];
532 char filename[PATH_MAX];
533 unsigned long start,size;
535 if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
538 /* get portio size */
539 rte_snprintf(filename, sizeof(filename),
540 "%s/portio/port0/size", dirname);
541 if (parse_sysfs_value(filename, &size) < 0) {
542 PMD_INIT_LOG(ERR, "%s(): cannot parse size\n",
547 /* get portio start */
548 rte_snprintf(filename, sizeof(filename),
549 "%s/portio/port0/start", dirname);
550 if (parse_sysfs_value(filename, &start) < 0) {
551 PMD_INIT_LOG(ERR, "%s(): cannot parse portio start\n",
555 pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
556 pci_dev->mem_resource[0].len = (uint64_t)size;
557 PMD_INIT_LOG(DEBUG, "PCI Port IO found start=0x%lx with "
558 "size=0x%lx\n", start, size);
561 hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
563 hw->max_rx_queues = VIRTIO_MAX_RX_QUEUES;
564 hw->max_tx_queues = VIRTIO_MAX_TX_QUEUES;
566 /* Reset the device although not necessary at startup */
569 /* Tell the host we've noticed this device. */
570 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
572 /* Tell the host we've known how to drive the device. */
573 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
574 virtio_negotiate_features(hw);
575 /* Setting up rx_header size for the device */
576 if(vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
577 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
579 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
581 /* Allocate memory for storing MAC addresses */
582 eth_dev->data->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
583 if (eth_dev->data->mac_addrs == NULL) {
585 "Failed to allocate %d bytes needed to store MAC addresses",
589 /* Copy the permanent MAC address to: virtio_hw */
590 virtio_get_hwaddr(hw);
591 ether_addr_copy((struct ether_addr *) hw->mac_addr,
592 ð_dev->data->mac_addrs[0]);
593 PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", hw->mac_addr[0],
594 hw->mac_addr[1],hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
596 if(vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
597 virtio_dev_cq_queue_setup(eth_dev, SOCKET_ID_ANY);
599 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
600 eth_dev->data->port_id, pci_dev->id.vendor_id,
601 pci_dev->id.device_id);
605 static struct eth_driver rte_virtio_pmd = {
607 .name = "rte_virtio_pmd",
608 .id_table = pci_id_virtio_map,
610 .eth_dev_init = eth_virtio_dev_init,
611 .dev_private_size = sizeof(struct virtio_adapter),
615 * Driver initialization routine.
616 * Invoked once at EAL init time.
617 * Register itself as the [Poll Mode] Driver of PCI virtio devices.
618 * Returns 0 on success.
621 rte_virtio_pmd_init(void)
623 rte_eth_driver_register(&rte_virtio_pmd);
628 * Only 1 queue is supported, no queue release related operation
631 virtio_dev_rx_queue_release(__rte_unused void *rxq)
636 virtio_dev_tx_queue_release(__rte_unused void *txq)
641 * Configure virtio device
642 * It returns 0 on success.
645 virtio_dev_configure(__rte_unused struct rte_eth_dev *dev)
652 virtio_dev_start(struct rte_eth_dev *dev)
655 struct virtio_hw *hw =
656 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
658 /* Tell the host we've noticed this device. */
659 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
661 /* Tell the host we've known how to drive the device. */
662 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
664 hw->adapter_stopped = 0;
666 /* Do final configuration before rx/tx engine starts */
667 virtio_dev_rxtx_start(dev);
669 /* Check VIRTIO_NET_F_STATUS for link status*/
670 if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
672 vtpci_read_dev_config(hw,
673 offsetof(struct virtio_net_config, status),
674 &status, sizeof(status));
675 if((status & VIRTIO_NET_S_LINK_UP) == 0) {
676 PMD_INIT_LOG(ERR, "Port: %d Link is DOWN\n", dev->data->port_id);
679 PMD_INIT_LOG(DEBUG, "Port: %d Link is UP\n", dev->data->port_id);
682 vtpci_reinit_complete(hw);
685 *Otherwise the tap backend might already stop its queue due to fullness.
686 *vhost backend will have no chance to be waked up
688 virtqueue_notify(dev->data->rx_queues[0]);
689 PMD_INIT_LOG(DEBUG, "Notified backend at initialization\n");
693 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
695 struct rte_mbuf * buf;
697 PMD_INIT_LOG(DEBUG, "Before freeing rxq used and unused buf \n");
698 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[0]);
699 while( (buf =(struct rte_mbuf *)virtqueue_detatch_unused(dev->data->rx_queues[0])) != NULL) {
700 rte_pktmbuf_free_seg(buf);
703 PMD_INIT_LOG(DEBUG, "free %d mbufs\n", i);
704 PMD_INIT_LOG(DEBUG, "After freeing rxq used and unused buf\n");
705 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[0]);
706 PMD_INIT_LOG(DEBUG, "Before freeing txq used and unused bufs\n");
707 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[0]);
709 while( (buf = (struct rte_mbuf *)virtqueue_detatch_unused(dev->data->tx_queues[0])) != NULL) {
710 rte_pktmbuf_free_seg(buf);
713 PMD_INIT_LOG(DEBUG, "free %d mbufs\n", i);
714 PMD_INIT_LOG(DEBUG, "After freeing txq used and unused buf\n");
715 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[0]);
719 * Stop device: disable rx and tx functions to allow for reconfiguring.
722 virtio_dev_stop(struct rte_eth_dev *dev)
724 struct virtio_hw *hw =
725 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
729 virtio_dev_free_mbufs(dev);
733 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
735 struct rte_eth_link link, old;
737 struct virtio_hw *hw =
738 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
739 memset(&link, 0, sizeof(link));
740 virtio_dev_atomic_read_link_status(dev, &link);
742 link.link_duplex = FULL_DUPLEX ;
743 link.link_speed = SPEED_10G ;
744 if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
745 PMD_INIT_LOG(DEBUG, "Get link status from hw\n");
746 vtpci_read_dev_config(hw,
747 offsetof(struct virtio_net_config, status),
748 &status, sizeof(status));
749 if((status & VIRTIO_NET_S_LINK_UP) == 0) {
750 link.link_status = 0;
751 PMD_INIT_LOG(DEBUG, "Port %d is down\n",dev->data->port_id);
753 link.link_status = 1;
754 PMD_INIT_LOG(DEBUG, "Port %d is up\n",dev->data->port_id);
757 link.link_status = 1; //Link up
759 virtio_dev_atomic_write_link_status(dev, &link);
760 if(old.link_status == link.link_status)
767 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
769 struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770 dev_info->driver_name = dev->driver->pci_drv.name;
771 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
772 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
773 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
774 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
775 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;