#include <stdbool.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_vhost.h>
#include <rte_spinlock.h>
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
+#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_QUEUES_ARG,
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
+ ETH_VHOST_IOMMU_SUPPORT,
NULL
};
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ uint16_t vid;
rte_atomic32_t started;
};
unsigned int i;
int allow_queuing = 1;
- if (rte_atomic32_read(&internal->started) == 0 ||
- rte_atomic32_read(&internal->dev_attached) == 0)
+ if (rte_atomic32_read(&internal->dev_attached) == 0)
+ return;
+
+ if (rte_atomic32_read(&internal->started) == 0)
allow_queuing = 0;
/* Wait until rx/tx_pkt_burst stops accessing vhost device */
}
}
+static void
+queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
+{
+ struct vhost_queue *vq;
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+}
+
static int
new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
- struct vhost_queue *vq;
unsigned i;
char ifname[PATH_MAX];
#ifdef RTE_LIBRTE_VHOST_NUMA
eth_dev->data->numa_node = newnode;
#endif
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
+ internal->vid = vid;
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ queue_setup(eth_dev, internal);
+ rte_atomic32_set(&internal->dev_attached, 1);
+ } else {
+ RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
+ rte_atomic32_set(&internal->dev_attached, 0);
}
for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev);
- RTE_LOG(INFO, PMD, "New connection established\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
return 0;
}
eth_dev = list->eth_dev;
internal = eth_dev->data->dev_private;
- rte_atomic32_set(&internal->dev_attached, 0);
+ rte_atomic32_set(&internal->started, 0);
update_queuing_status(eth_dev);
+ rte_atomic32_set(&internal->dev_attached, 0);
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "Connection closed\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
return 0;
}
}
static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *eth_dev)
{
- struct pmd_internal *internal = dev->data->dev_private;
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+
+ if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
+ queue_setup(eth_dev, internal);
+ rte_atomic32_set(&internal->dev_attached, 1);
+ }
rte_atomic32_set(&internal->started, 1);
- update_queuing_status(dev);
+ update_queuing_status(eth_dev);
return 0;
}
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
+
+ return 0;
}
static void
internal->max_queues = queues;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
- data->dev_flags =
- RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
eth_dev->dev_ops = &ops;
uint64_t flags = 0;
int client_mode = 0;
int dequeue_zero_copy = 0;
+ int iommu_support = 0;
RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
rte_vdev_device_name(dev));
flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
+ &open_int, &iommu_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (iommu_support)
+ flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();