#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
-static const char *drivername = "VHOST PMD";
-
static const char *valid_arguments[] = {
ETH_VHOST_IFACE_ARG,
ETH_VHOST_QUEUES_ARG,
};
struct pmd_internal {
+ rte_atomic32_t dev_attached;
char *dev_name;
char *iface_name;
uint16_t max_queues;
- uint64_t flags;
-
- volatile uint16_t once;
+ rte_atomic32_t started;
};
struct internal_list {
*(uint64_t *)(((char *)vq)
+ vhost_rxport_stat_strings[t].offset);
}
+ xstats[count].id = count;
count++;
}
for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
*(uint64_t *)(((char *)vq)
+ vhost_txport_stat_strings[t].offset);
}
+ xstats[count].id = count;
count++;
}
return count;
return list;
}
+static void
+update_queuing_status(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ struct vhost_queue *vq;
+ unsigned int i;
+ int allow_queuing = 1;
+
+ if (rte_atomic32_read(&internal->started) == 0 ||
+ rte_atomic32_read(&internal->dev_attached) == 0)
+ allow_queuing = 0;
+
+ /* Wait until rx/tx_pkt_burst stops accessing vhost device */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+}
+
static int
new_device(int vid)
{
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_queuing_status(eth_dev);
RTE_LOG(INFO, PMD, "New connection established\n");
destroy_device(int vid)
{
struct rte_eth_dev *eth_dev;
+ struct pmd_internal *internal;
struct vhost_queue *vq;
struct internal_list *list;
char ifname[PATH_MAX];
return;
}
eth_dev = list->eth_dev;
+ internal = eth_dev->data->dev_private;
- /* Wait until rx/tx_pkt_burst stops accessing vhost device */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_queuing_status(eth_dev);
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev_start(struct rte_eth_dev *dev)
{
struct pmd_internal *internal = dev->data->dev_private;
- int ret = 0;
- if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
- ret = rte_vhost_driver_register(internal->iface_name,
- internal->flags);
- if (ret)
- return ret;
- }
-
- /* We need only one message handling thread */
- if (rte_atomic16_add_return(&nb_started_ports, 1) == 1)
- ret = vhost_driver_session_start();
+ rte_atomic32_set(&internal->started, 1);
+ update_queuing_status(dev);
- return ret;
+ return 0;
}
static void
{
struct pmd_internal *internal = dev->data->dev_private;
- if (rte_atomic16_cmpset(&internal->once, 1, 0))
- rte_vhost_driver_unregister(internal->iface_name);
-
- if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
- vhost_driver_session_stop();
+ rte_atomic32_set(&internal->started, 0);
+ update_queuing_status(dev);
}
static int
return;
}
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = internal->max_queues;
if (vring_state == NULL)
goto error;
- TAILQ_INIT(ð_dev->link_intr_cbs);
-
/* now put it all together
* - store queue data in internal,
* - store numa_node info in ethdev data
internal->iface_name = strdup(iface_name);
if (internal->iface_name == NULL)
goto error;
- internal->flags = flags;
list->eth_dev = eth_dev;
pthread_mutex_lock(&internal_list_lock);
eth_dev->rx_pkt_burst = eth_vhost_rx;
eth_dev->tx_pkt_burst = eth_vhost_tx;
+ if (rte_vhost_driver_register(iface_name, flags))
+ goto error;
+
+ /* We need only one message handling thread */
+ if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
+ if (vhost_driver_session_start())
+ goto error;
+ }
+
return data->port_id;
error:
eth_dev_stop(eth_dev);
+ rte_vhost_driver_unregister(internal->iface_name);
+
+ if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
+ vhost_driver_session_stop();
+
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;