4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #ifdef RTE_EXEC_ENV_LINUXAPP
43 #include <rte_ethdev.h>
44 #include <rte_memcpy.h>
45 #include <rte_string_fns.h>
46 #include <rte_memzone.h>
47 #include <rte_malloc.h>
48 #include <rte_atomic.h>
49 #include <rte_branch_prediction.h>
51 #include <rte_ether.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
58 #include "virtio_ethdev.h"
59 #include "virtio_pci.h"
60 #include "virtio_logs.h"
61 #include "virtqueue.h"
64 static int eth_virtio_dev_init(struct eth_driver *eth_drv,
65 struct rte_eth_dev *eth_dev);
66 static int virtio_dev_configure(struct rte_eth_dev *dev);
67 static int virtio_dev_start(struct rte_eth_dev *dev);
68 static void virtio_dev_stop(struct rte_eth_dev *dev);
69 static void virtio_dev_info_get(struct rte_eth_dev *dev,
70 struct rte_eth_dev_info *dev_info);
71 static int virtio_dev_link_update(struct rte_eth_dev *dev,
72 __rte_unused int wait_to_complete);
74 static void virtio_set_hwaddr(struct virtio_hw *hw);
75 static void virtio_get_hwaddr(struct virtio_hw *hw);
77 static void virtio_dev_rx_queue_release(__rte_unused void *rxq);
78 static void virtio_dev_tx_queue_release(__rte_unused void *txq);
80 static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
81 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
82 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
85 * The set of PCI devices this driver supports
87 static struct rte_pci_id pci_id_virtio_map[] = {
89 #define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
90 #include "rte_pci_dev_ids.h"
92 { .vendor_id = 0, /* sentinel */ },
95 int virtio_dev_queue_setup(struct rte_eth_dev *dev,
98 uint8_t vtpci_queue_idx,
100 unsigned int socket_id,
101 struct virtqueue **pvq)
103 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
104 const struct rte_memzone *mz;
107 struct virtio_hw *hw =
108 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
109 struct virtqueue *vq = NULL;
111 /* Write the virtqueue index to the Queue Select Field */
112 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vtpci_queue_idx);
113 PMD_INIT_LOG(DEBUG, "selecting queue: %d\n", vtpci_queue_idx);
116 * Read the virtqueue size from the Queue Size field
117 * Always power of 2 and if 0 virtqueue does not exist
119 vq_size = VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
120 PMD_INIT_LOG(DEBUG, "vq_size: %d nb_desc:%d\n", vq_size, nb_desc);
124 PMD_INIT_LOG(ERR, "%s: virtqueue does not exist\n", __func__);
126 } else if (!rte_is_power_of_2(vq_size)) {
127 PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2\n", __func__);
129 } else if (nb_desc != vq_size) {
130 PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size\n",
135 if (queue_type == VTNET_RQ) {
136 rte_snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
137 dev->data->port_id, queue_idx);
138 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
139 vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
140 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
141 } else if(queue_type == VTNET_TQ) {
142 rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
143 dev->data->port_id, queue_idx);
144 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
145 vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
146 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
147 } else if(queue_type == VTNET_CQ) {
148 rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
150 vq = rte_zmalloc(vq_name, sizeof(struct virtqueue),
152 memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
155 PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue\n", __func__);
159 vq->port_id = dev->data->port_id;
160 vq->queue_id = queue_idx;
161 vq->vq_queue_index = vtpci_queue_idx;
162 vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
163 vq->vq_nentries = vq_size;
164 vq->vq_free_cnt = vq_size;
167 * Reserve a memzone for vring elements
169 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
170 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
171 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d\n", size, vq->vq_ring_size);
173 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
174 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
180 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
181 * and only accepts 32 bit page frame number.
182 * Check if the allocated physical memory exceeds 16TB.
184 if ( (mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32) ) {
185 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!\n");
189 memset(mz->addr, 0, sizeof(mz->len));
191 vq->vq_ring_mem = mz->phys_addr;
192 vq->vq_ring_virt_mem = mz->addr;
193 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64"\n", (uint64_t)mz->phys_addr);
194 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64"\n", (uint64_t)mz->addr);
195 vq->virtio_net_hdr_mz = NULL;
196 vq->virtio_net_hdr_mem = (void *)NULL;
198 if (queue_type == VTNET_TQ) {
200 * For each xmit packet, allocate a virtio_net_hdr
202 rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
203 dev->data->port_id, queue_idx);
204 vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
205 vq_size * sizeof(struct virtio_net_hdr),
206 socket_id, 0, CACHE_LINE_SIZE);
207 if (vq->virtio_net_hdr_mz == NULL) {
211 vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
212 memset(vq->virtio_net_hdr_mz->addr, 0, vq_size * sizeof(struct virtio_net_hdr));
213 } else if (queue_type == VTNET_CQ) {
214 /* Allocate a page for control vq command, data and status */
215 rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
217 vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
218 PAGE_SIZE, socket_id, 0, CACHE_LINE_SIZE);
219 if (vq->virtio_net_hdr_mz == NULL) {
223 vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
224 memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
228 * Set guest physical address of the virtqueue
229 * in VIRTIO_PCI_QUEUE_PFN config register of device
231 VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
232 mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
238 virtio_dev_cq_queue_setup(struct rte_eth_dev *dev,
239 unsigned int socket_id)
241 struct virtqueue *vq;
242 uint16_t nb_desc = 0;
244 struct virtio_hw *hw =
245 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
247 PMD_INIT_FUNC_TRACE();
248 ret = virtio_dev_queue_setup(dev, VTNET_CQ, 0, VTNET_SQ_CQ_QUEUE_IDX,
249 nb_desc, socket_id, &vq);
251 PMD_INIT_LOG(ERR, "control vq initialization failed\n");
260 virtio_dev_close(struct rte_eth_dev *dev)
262 PMD_INIT_LOG(DEBUG, "virtio_dev_close");
264 virtio_dev_stop(dev);
269 * dev_ops for virtio, bare necessities for basic operation
271 static struct eth_dev_ops virtio_eth_dev_ops = {
272 .dev_configure = virtio_dev_configure,
273 .dev_start = virtio_dev_start,
274 .dev_stop = virtio_dev_stop,
275 .dev_close = virtio_dev_close,
277 .dev_infos_get = virtio_dev_info_get,
278 .stats_get = virtio_dev_stats_get,
279 .stats_reset = virtio_dev_stats_reset,
280 .link_update = virtio_dev_link_update,
281 .mac_addr_add = NULL,
282 .mac_addr_remove = NULL,
283 .rx_queue_setup = virtio_dev_rx_queue_setup,
284 .rx_queue_release = virtio_dev_rx_queue_release, /* meaningfull only to multiple queue */
285 .tx_queue_setup = virtio_dev_tx_queue_setup,
286 .tx_queue_release = virtio_dev_tx_queue_release /* meaningfull only to multiple queue */
290 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
291 struct rte_eth_link *link)
293 struct rte_eth_link *dst = link;
294 struct rte_eth_link *src = &(dev->data->dev_link);
296 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297 *(uint64_t *)src) == 0)
304 * Atomically writes the link status information into global
305 * structure rte_eth_dev.
308 * - Pointer to the structure rte_eth_dev to read from.
309 * - Pointer to the buffer to be saved with the link status.
312 * - On success, zero.
313 * - On failure, negative value.
316 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
317 struct rte_eth_link *link)
319 struct rte_eth_link *dst = &(dev->data->dev_link);
320 struct rte_eth_link *src = link;
322 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
323 *(uint64_t *)src) == 0)
330 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
332 struct virtio_hw *hw =
333 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
335 memcpy(stats, &hw->eth_stats, sizeof(*stats));
339 virtio_dev_stats_reset(struct rte_eth_dev *dev)
341 struct virtio_hw *hw =
342 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
343 /* Reset software totals */
344 memset(&hw->eth_stats, 0, sizeof(hw->eth_stats));
348 virtio_set_hwaddr(struct virtio_hw *hw)
350 vtpci_write_dev_config(hw,
351 offsetof(struct virtio_net_config, mac),
352 &hw->mac_addr, ETHER_ADDR_LEN);
356 virtio_get_hwaddr(struct virtio_hw *hw)
358 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
359 vtpci_read_dev_config(hw,
360 offsetof(struct virtio_net_config, mac),
361 &hw->mac_addr, ETHER_ADDR_LEN);
363 eth_random_addr(&hw->mac_addr[0]);
364 virtio_set_hwaddr(hw);
370 virtio_negotiate_features(struct virtio_hw *hw)
372 uint32_t guest_features, mask;
373 mask = VIRTIO_NET_F_CTRL_VQ | VIRTIO_NET_F_CTRL_RX | VIRTIO_NET_F_CTRL_VLAN;
374 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM ;
376 /* TSO and LRO are only available when their corresponding
377 * checksum offload feature is also negotiated.
379 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN;
380 mask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;
381 mask |= VTNET_LRO_FEATURES;
383 /* rx_mbuf should not be in multiple merged segments */
384 mask |= VIRTIO_NET_F_MRG_RXBUF;
386 /* not negotiating INDIRECT descriptor table support */
387 mask |= VIRTIO_RING_F_INDIRECT_DESC;
389 /* Prepare guest_features: feature that driver wants to support */
390 guest_features = VTNET_FEATURES & ~mask;
392 /* Read device(host) feature bits */
393 hw->host_features = VIRTIO_READ_REG_4(hw, VIRTIO_PCI_HOST_FEATURES);
395 /* Negotiate features: Subset of device feature bits are written back (guest feature bits) */
396 hw->guest_features = vtpci_negotiate_features(hw, guest_features);
399 #ifdef RTE_EXEC_ENV_LINUXAPP
401 parse_sysfs_value(const char *filename, unsigned long *val)
407 if ((f = fopen(filename, "r")) == NULL) {
408 PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s\n",
413 if (fgets(buf, sizeof(buf), f) == NULL) {
414 PMD_INIT_LOG(ERR, "%s(): cannot read sysfs value %s\n",
419 *val = strtoul(buf, &end, 0);
420 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
421 PMD_INIT_LOG(ERR, "%s(): cannot parse sysfs value %s\n",
430 static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
432 unsigned int uio_num;
435 char dirname[PATH_MAX];
437 /* depending on kernel version, uio can be located in uio/uioX
439 rte_snprintf(dirname, sizeof(dirname),
440 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
441 loc->domain, loc->bus, loc->devid, loc->function);
442 dir = opendir(dirname);
444 /* retry with the parent directory */
445 rte_snprintf(dirname, sizeof(dirname),
446 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
447 loc->domain, loc->bus, loc->devid, loc->function);
448 dir = opendir(dirname);
451 PMD_INIT_LOG(ERR, "Cannot opendir %s\n", dirname);
456 /* take the first file starting with "uio" */
457 while ((e = readdir(dir)) != NULL) {
458 /* format could be uio%d ...*/
459 int shortprefix_len = sizeof("uio") - 1;
460 /* ... or uio:uio%d */
461 int longprefix_len = sizeof("uio:uio") - 1;
464 if (strncmp(e->d_name, "uio", 3) != 0)
467 /* first try uio%d */
469 uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
470 if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
471 rte_snprintf(buf, buflen, "%s/uio%u", dirname, uio_num);
475 /* then try uio:uio%d */
477 uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
478 if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
479 rte_snprintf(buf, buflen, "%s/uio:uio%u", dirname,
486 /* No uio resource found */
488 PMD_INIT_LOG(ERR, "Could not find uio resource\n");
497 * This function is based on probe() function in virtio_pci.c
498 * It returns 0 on success.
501 eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
502 struct rte_eth_dev *eth_dev)
504 struct rte_pci_device *pci_dev;
505 struct virtio_hw *hw =
506 VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
507 if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr) ) {
509 "MBUF HEADROOM should be enough to hold virtio net hdr\n");
513 if (! (rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
515 "IOPL call failed in EAL init - cannot use virtio PMD driver\n");
519 eth_dev->dev_ops = &virtio_eth_dev_ops;
520 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
521 eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
523 if(rte_eal_process_type() == RTE_PROC_SECONDARY)
526 pci_dev = eth_dev->pci_dev;
528 hw->device_id = pci_dev->id.device_id;
529 hw->vendor_id = pci_dev->id.vendor_id;
530 #ifdef RTE_EXEC_ENV_LINUXAPP
532 char dirname[PATH_MAX];
533 char filename[PATH_MAX];
534 unsigned long start,size;
536 if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
539 /* get portio size */
540 rte_snprintf(filename, sizeof(filename),
541 "%s/portio/port0/size", dirname);
542 if (parse_sysfs_value(filename, &size) < 0) {
543 PMD_INIT_LOG(ERR, "%s(): cannot parse size\n",
548 /* get portio start */
549 rte_snprintf(filename, sizeof(filename),
550 "%s/portio/port0/start", dirname);
551 if (parse_sysfs_value(filename, &start) < 0) {
552 PMD_INIT_LOG(ERR, "%s(): cannot parse portio start\n",
556 pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
557 pci_dev->mem_resource[0].len = (uint64_t)size;
558 PMD_INIT_LOG(DEBUG, "PCI Port IO found start=0x%lx with "
559 "size=0x%lx\n", start, size);
562 hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
564 hw->max_rx_queues = VIRTIO_MAX_RX_QUEUES;
565 hw->max_tx_queues = VIRTIO_MAX_TX_QUEUES;
567 /* Reset the device although not necessary at startup */
570 /* Tell the host we've noticed this device. */
571 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
573 /* Tell the host we've known how to drive the device. */
574 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
575 virtio_negotiate_features(hw);
576 /* Setting up rx_header size for the device */
577 if(vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
578 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
580 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
582 /* Allocate memory for storing MAC addresses */
583 eth_dev->data->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
584 if (eth_dev->data->mac_addrs == NULL) {
586 "Failed to allocate %d bytes needed to store MAC addresses",
590 /* Copy the permanent MAC address to: virtio_hw */
591 virtio_get_hwaddr(hw);
592 ether_addr_copy((struct ether_addr *) hw->mac_addr,
593 ð_dev->data->mac_addrs[0]);
594 PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", hw->mac_addr[0],
595 hw->mac_addr[1],hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
597 if(vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
598 virtio_dev_cq_queue_setup(eth_dev, SOCKET_ID_ANY);
600 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
601 eth_dev->data->port_id, pci_dev->id.vendor_id,
602 pci_dev->id.device_id);
606 static struct eth_driver rte_virtio_pmd = {
608 .name = "rte_virtio_pmd",
609 .id_table = pci_id_virtio_map,
611 .eth_dev_init = eth_virtio_dev_init,
612 .dev_private_size = sizeof(struct virtio_adapter),
616 * Driver initialization routine.
617 * Invoked once at EAL init time.
618 * Register itself as the [Poll Mode] Driver of PCI virtio devices.
619 * Returns 0 on success.
622 rte_virtio_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
624 rte_eth_driver_register(&rte_virtio_pmd);
629 * Only 1 queue is supported, no queue release related operation
632 virtio_dev_rx_queue_release(__rte_unused void *rxq)
637 virtio_dev_tx_queue_release(__rte_unused void *txq)
642 * Configure virtio device
643 * It returns 0 on success.
646 virtio_dev_configure(__rte_unused struct rte_eth_dev *dev)
653 virtio_dev_start(struct rte_eth_dev *dev)
656 struct virtio_hw *hw =
657 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
659 /* Tell the host we've noticed this device. */
660 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
662 /* Tell the host we've known how to drive the device. */
663 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
665 hw->adapter_stopped = 0;
667 /* Do final configuration before rx/tx engine starts */
668 virtio_dev_rxtx_start(dev);
670 /* Check VIRTIO_NET_F_STATUS for link status*/
671 if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
673 vtpci_read_dev_config(hw,
674 offsetof(struct virtio_net_config, status),
675 &status, sizeof(status));
676 if((status & VIRTIO_NET_S_LINK_UP) == 0) {
677 PMD_INIT_LOG(ERR, "Port: %d Link is DOWN\n", dev->data->port_id);
680 PMD_INIT_LOG(DEBUG, "Port: %d Link is UP\n", dev->data->port_id);
683 vtpci_reinit_complete(hw);
686 *Otherwise the tap backend might already stop its queue due to fullness.
687 *vhost backend will have no chance to be waked up
689 virtqueue_notify(dev->data->rx_queues[0]);
690 PMD_INIT_LOG(DEBUG, "Notified backend at initialization\n");
694 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
696 struct rte_mbuf * buf;
698 PMD_INIT_LOG(DEBUG, "Before freeing rxq used and unused buf \n");
699 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[0]);
700 while( (buf =(struct rte_mbuf *)virtqueue_detatch_unused(dev->data->rx_queues[0])) != NULL) {
701 rte_pktmbuf_free_seg(buf);
704 PMD_INIT_LOG(DEBUG, "free %d mbufs\n", i);
705 PMD_INIT_LOG(DEBUG, "After freeing rxq used and unused buf\n");
706 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[0]);
707 PMD_INIT_LOG(DEBUG, "Before freeing txq used and unused bufs\n");
708 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[0]);
710 while( (buf = (struct rte_mbuf *)virtqueue_detatch_unused(dev->data->tx_queues[0])) != NULL) {
711 rte_pktmbuf_free_seg(buf);
714 PMD_INIT_LOG(DEBUG, "free %d mbufs\n", i);
715 PMD_INIT_LOG(DEBUG, "After freeing txq used and unused buf\n");
716 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[0]);
720 * Stop device: disable rx and tx functions to allow for reconfiguring.
723 virtio_dev_stop(struct rte_eth_dev *dev)
725 struct virtio_hw *hw =
726 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
730 virtio_dev_free_mbufs(dev);
734 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
736 struct rte_eth_link link, old;
738 struct virtio_hw *hw =
739 VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
740 memset(&link, 0, sizeof(link));
741 virtio_dev_atomic_read_link_status(dev, &link);
743 link.link_duplex = FULL_DUPLEX ;
744 link.link_speed = SPEED_10G ;
745 if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
746 PMD_INIT_LOG(DEBUG, "Get link status from hw\n");
747 vtpci_read_dev_config(hw,
748 offsetof(struct virtio_net_config, status),
749 &status, sizeof(status));
750 if((status & VIRTIO_NET_S_LINK_UP) == 0) {
751 link.link_status = 0;
752 PMD_INIT_LOG(DEBUG, "Port %d is down\n",dev->data->port_id);
754 link.link_status = 1;
755 PMD_INIT_LOG(DEBUG, "Port %d is up\n",dev->data->port_id);
758 link.link_status = 1; //Link up
760 virtio_dev_atomic_write_link_status(dev, &link);
761 if(old.link_status == link.link_status)
768 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
770 struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
771 dev_info->driver_name = dev->driver->pci_drv.name;
772 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
773 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
774 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
775 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
776 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
779 static struct rte_driver rte_virtio_driver = {
781 .init = rte_virtio_pmd_init,
784 PMD_REGISTER_DRIVER(rte_virtio_driver);