--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2021 Red Hat, Inc.
+ */
+
+#ifndef _VIRTIO_H_
+#define _VIRTIO_H_
+
+#include <rte_ether.h>
+
+struct virtio_hw {
+ struct virtqueue **vqs;
+ uint64_t guest_features;
+ uint16_t vtnet_hdr_size;
+ uint8_t started;
+ uint8_t weak_barriers;
+ uint8_t vlan_strip;
+ uint8_t has_tx_offload;
+ uint8_t has_rx_offload;
+ uint8_t use_vec_rx;
+ uint8_t use_vec_tx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ uint8_t opened;
+ uint16_t port_id;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+ uint32_t speed; /* link speed in MB */
+ uint8_t duplex;
+ uint8_t use_msix;
+ uint16_t max_mtu;
+ /*
+ * App management thread and virtio interrupt handler thread
+ * both can change device state, this lock is meant to avoid
+ * such a contention.
+ */
+ rte_spinlock_t state_lock;
+ struct rte_mbuf **inject_pkts;
+ uint16_t max_queue_pairs;
+ uint64_t req_guest_features;
+ struct virtnet_ctl *cvq;
+};
+
+struct virtio_ops {
+ void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset, void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset, const void *src, int len);
+ uint8_t (*get_status)(struct virtio_hw *hw);
+ void (*set_status)(struct virtio_hw *hw, uint8_t status);
+ uint64_t (*get_features)(struct virtio_hw *hw);
+ void (*set_features)(struct virtio_hw *hw, uint64_t features);
+ int (*features_ok)(struct virtio_hw *hw);
+ uint8_t (*get_isr)(struct virtio_hw *hw);
+ uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
+ uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec);
+ uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
+ int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ void (*intr_detect)(struct virtio_hw *hw);
+ int (*dev_close)(struct virtio_hw *hw);
+};
+
+/*
+ * This structure stores per-process data. Only virtio_ops for now.
+ */
+struct virtio_hw_internal {
+ const struct virtio_ops *virtio_ops;
+};
+
+#define VIRTIO_OPS(hw) (virtio_hw_internal[(hw)->port_id].virtio_ops)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
+
+#endif /* _VIRTIO_H_ */
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
- vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
}
}
- if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
return -EINVAL;
}
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
+ VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
if (intr_conf->rxq)
virtio_queues_unbind_intr(dev);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
- return VTPCI_OPS(hw)->dev_close(hw);
+ return VIRTIO_OPS(hw)->dev_close(hw);
}
static int
if (rte_intr_ack(dev->intr_handle) < 0)
return -1;
- if (VTPCI_OPS(hw)->intr_detect)
- VTPCI_OPS(hw)->intr_detect(hw);
+ if (VIRTIO_OPS(hw)->intr_detect)
+ VIRTIO_OPS(hw)->intr_detect(hw);
return 0;
}
if (rte_intr_enable(dev->intr_handle) < 0)
return -1;
- if (VTPCI_OPS(hw)->intr_detect)
- VTPCI_OPS(hw)->intr_detect(hw);
+ if (VIRTIO_OPS(hw)->intr_detect)
+ VIRTIO_OPS(hw)->intr_detect(hw);
return 0;
}
if (rte_intr_disable(dev->intr_handle) < 0)
return -1;
- if (VTPCI_OPS(hw)->intr_detect)
- VTPCI_OPS(hw)->intr_detect(hw);
+ if (VIRTIO_OPS(hw)->intr_detect)
+ VIRTIO_OPS(hw)->intr_detect(hw);
return 0;
}
req_features);
/* Read device(host) feature bits */
- host_features = VTPCI_OPS(hw)->get_features(hw);
+ host_features = VIRTIO_OPS(hw)->get_features(hw);
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
- if (VTPCI_OPS(hw)->features_ok(hw) < 0)
+ if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
return -1;
if (vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
PMD_INIT_LOG(INFO, "queue/interrupt binding");
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
dev->intr_handle->intr_vec[i] = i + 1;
- if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set queue vector");
return -EBUSY;
PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
for (i = 0; i < dev->data->nb_rx_queues; ++i)
- VTPCI_OPS(hw)->set_queue_irq(hw,
+ VIRTIO_OPS(hw)->set_queue_irq(hw,
hw->vqs[i * VTNET_CQ],
VIRTIO_MSI_NO_VECTOR);
}
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* Enable vector (0) for Link State Intrerrupt */
- if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
+ if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
return -EBUSY;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
- host_features = VTPCI_OPS(hw)->get_features(hw);
+ host_features = VIRTIO_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
return 0;
}
-const struct virtio_pci_ops legacy_ops = {
+const struct virtio_ops legacy_ops = {
.read_dev_cfg = legacy_read_dev_config,
.write_dev_cfg = legacy_write_dev_config,
.get_status = legacy_get_status,
return 0;
}
-const struct virtio_pci_ops modern_ops = {
+const struct virtio_ops modern_ops = {
.read_dev_cfg = modern_read_dev_config,
.write_dev_cfg = modern_write_dev_config,
.get_status = modern_get_status,
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
{
- VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+ VIRTIO_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
}
void
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
const void *src, int length)
{
- VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+ VIRTIO_OPS(hw)->write_dev_cfg(hw, offset, src, length);
}
uint64_t
* host all support.
*/
features = host_features & hw->guest_features;
- VTPCI_OPS(hw)->set_features(hw, features);
+ VIRTIO_OPS(hw)->set_features(hw, features);
return features;
}
void
vtpci_reset(struct virtio_hw *hw)
{
- VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ VIRTIO_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
/* flush status write */
- VTPCI_OPS(hw)->get_status(hw);
+ VIRTIO_OPS(hw)->get_status(hw);
}
void
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
- status |= VTPCI_OPS(hw)->get_status(hw);
+ status |= VIRTIO_OPS(hw)->get_status(hw);
- VTPCI_OPS(hw)->set_status(hw, status);
+ VIRTIO_OPS(hw)->set_status(hw, status);
}
uint8_t
vtpci_get_status(struct virtio_hw *hw)
{
- return VTPCI_OPS(hw)->get_status(hw);
+ return VIRTIO_OPS(hw)->get_status(hw);
}
uint8_t
vtpci_isr(struct virtio_hw *hw)
{
- return VTPCI_OPS(hw)->get_isr(hw);
+ return VIRTIO_OPS(hw)->get_isr(hw);
}
static void *
*/
if (virtio_read_caps(pci_dev, hw) == 0) {
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
- virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
+ VIRTIO_OPS(hw) = &modern_ops;
dev->modern = true;
goto msix_detect;
}
return -1;
}
- virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
+ VIRTIO_OPS(hw) = &legacy_ops;
dev->modern = false;
msix_detect:
- VTPCI_OPS(hw)->intr_detect(hw);
+ VIRTIO_OPS(hw)->intr_detect(hw);
return 0;
}
#include <rte_bus_pci.h>
#include <ethdev_driver.h>
+#include "virtio.h"
+
struct virtqueue;
struct virtnet_ctl;
uint32_t queue_used_hi; /* read-write */
};
-struct virtio_hw;
-
-struct virtio_pci_ops {
- void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset,
- void *dst, int len);
- void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset,
- const void *src, int len);
-
- uint8_t (*get_status)(struct virtio_hw *hw);
- void (*set_status)(struct virtio_hw *hw, uint8_t status);
-
- uint64_t (*get_features)(struct virtio_hw *hw);
- void (*set_features)(struct virtio_hw *hw, uint64_t features);
- int (*features_ok)(struct virtio_hw *hw);
-
- uint8_t (*get_isr)(struct virtio_hw *hw);
-
- uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
-
- uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
- uint16_t vec);
-
- uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
- int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
- void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
- void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
- void (*intr_detect)(struct virtio_hw *hw);
- int (*dev_close)(struct virtio_hw *hw);
-};
-
-struct virtio_net_config;
-
-struct virtio_hw {
- struct virtqueue **vqs;
- uint64_t guest_features;
- uint16_t vtnet_hdr_size;
- uint8_t started;
- uint8_t weak_barriers;
- uint8_t vlan_strip;
- uint8_t has_tx_offload;
- uint8_t has_rx_offload;
- uint8_t use_vec_rx;
- uint8_t use_vec_tx;
- uint8_t use_inorder_rx;
- uint8_t use_inorder_tx;
- uint8_t opened;
- uint16_t port_id;
- uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
- uint32_t speed; /* link speed in MB */
- uint8_t duplex;
- uint8_t use_msix;
- uint16_t max_mtu;
- /*
- * App management thread and virtio interrupt handler thread
- * both can change device state, this lock is meant to avoid
- * such a contention.
- */
- rte_spinlock_t state_lock;
- struct rte_mbuf **inject_pkts;
- uint16_t max_queue_pairs;
- uint64_t req_guest_features;
- struct virtnet_ctl *cvq;
-};
-
struct virtio_pci_dev {
struct virtio_hw hw;
struct rte_pci_device *pci_dev;
#define virtio_pci_get_dev(hwp) container_of(hwp, struct virtio_pci_dev, hw)
-/*
- * While virtio_hw is stored in shared memory, this structure stores
- * some infos that may vary in the multiple process model locally.
- * For example, the vtpci_ops pointer.
- */
-struct virtio_hw_internal {
- const struct virtio_pci_ops *vtpci_ops;
-};
-
-#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
-
-extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
-
/*
* This structure is just a reference to read
* net device specific config space; it just a chodu structure
void vtpci_legacy_ioport_unmap(struct virtio_hw *hw);
int vtpci_legacy_ioport_map(struct virtio_hw *hw);
-extern const struct virtio_pci_ops legacy_ops;
-extern const struct virtio_pci_ops modern_ops;
-extern const struct virtio_pci_ops virtio_user_ops;
+extern const struct virtio_ops legacy_ops;
+extern const struct virtio_ops modern_ops;
#endif /* _VIRTIO_PCI_H_ */
#include <rte_dev.h>
#include <rte_kvargs.h>
+#include "virtio.h"
#include "virtio_ethdev.h"
#include "virtio_pci.h"
#include "virtio_logs.h"
}
} else {
if (dev->modern)
- VTPCI_OPS(hw) = &modern_ops;
+ VIRTIO_OPS(hw) = &modern_ops;
else
- VTPCI_OPS(hw) = &legacy_ops;
+ VIRTIO_OPS(hw) = &legacy_ops;
ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), dev);
if (ret < 0) {
#include "virtio_ethdev.h"
#include "virtio_logs.h"
-#include "virtio_pci.h"
+#include "virtio.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_user/virtio_user_dev.h"
return 0;
}
-const struct virtio_pci_ops virtio_user_ops = {
+const struct virtio_ops virtio_user_ops = {
.read_dev_cfg = virtio_user_read_dev_config,
.write_dev_cfg = virtio_user_write_dev_config,
.get_status = virtio_user_get_status,
hw->port_id = data->port_id;
dev->port_id = data->port_id;
- virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
+ VIRTIO_OPS(hw) = &virtio_user_ops;
/*
* MSIX is required to enable LSC (see virtio_init_device).
* Here just pretend that we support msix.
dev = eth_dev->data->dev_private;
hw = &dev->hw;
- VTPCI_OPS(hw) = &virtio_user_ops;
+ VIRTIO_OPS(hw) = &virtio_user_ops;
if (eth_virtio_dev_init(eth_dev) < 0) {
PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
static inline void
virtqueue_notify(struct virtqueue *vq)
{
- VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+ VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
}
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP