#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
+#define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
+#define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
+#define ETH_VHOST_LINEAR_BUF "linear-buffer"
+#define ETH_VHOST_EXT_BUF "ext-buffer"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
ETH_VHOST_IOMMU_SUPPORT,
+ ETH_VHOST_POSTCOPY_SUPPORT,
+ ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
+ ETH_VHOST_LINEAR_BUF,
+ ETH_VHOST_EXT_BUF,
NULL
};
-static struct ether_addr base_eth_addr = {
+static struct rte_ether_addr base_eth_addr = {
.addr_bytes = {
0x56 /* V */,
0x48 /* H */,
struct pmd_internal {
rte_atomic32_t dev_attached;
- char *dev_name;
char *iface_name;
+ uint64_t flags;
+ uint64_t disable_flags;
uint16_t max_queues;
int vid;
rte_atomic32_t started;
#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
sizeof(vhost_txport_stat_strings[0]))
-static void
+static int
vhost_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct vhost_queue *vq = NULL;
continue;
memset(&vq->stats, 0, sizeof(vq->stats));
}
+
+ return 0;
}
static int
vhost_count_multicast_broadcast(struct vhost_queue *vq,
struct rte_mbuf *mbuf)
{
- struct ether_addr *ea = NULL;
+ struct rte_ether_addr *ea = NULL;
struct vhost_stats *pstats = &vq->stats;
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
pstats->xstats[VHOST_BROADCAST_PKT]++;
else
pstats->xstats[VHOST_MULTICAST_PKT]++;
return nb_tx;
}
-static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- struct pmd_internal *internal = dev->data->dev_private;
- const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-
- internal->vlan_strip = rxmode->hw_vlan_strip;
-
- if (rxmode->hw_vlan_filter)
- VHOST_LOG(WARNING,
- "vhost(%s): vlan filtering not available\n",
- internal->dev_name);
-
- return 0;
-}
-
static inline struct internal_list *
find_internal_resource(char *ifname)
{
/* won't be NULL */
state = vring_states[eth_dev->data->port_id];
rte_spinlock_lock(&state->lock);
+ if (state->cur[vring] == enable) {
+ rte_spinlock_unlock(&state->lock);
+ return 0;
+ }
state->cur[vring] = enable;
state->max_vring = RTE_MAX(vring, state->max_vring);
rte_spinlock_unlock(&state->lock);
.vring_state_changed = vring_state_changed,
};
+static int
+vhost_driver_setup(struct rte_eth_dev *eth_dev)
+{
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct internal_list *list = NULL;
+ struct rte_vhost_vring_state *vring_state = NULL;
+ unsigned int numa_node = eth_dev->device->numa_node;
+ const char *name = eth_dev->device->name;
+
+ /* Don't try to setup again if it has already been done. */
+ list = find_internal_resource(internal->iface_name);
+ if (list)
+ return 0;
+
+ list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
+ if (list == NULL)
+ return -1;
+
+ vring_state = rte_zmalloc_socket(name, sizeof(*vring_state),
+ 0, numa_node);
+ if (vring_state == NULL)
+ goto free_list;
+
+ list->eth_dev = eth_dev;
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ rte_spinlock_init(&vring_state->lock);
+ vring_states[eth_dev->data->port_id] = vring_state;
+
+ if (rte_vhost_driver_register(internal->iface_name, internal->flags))
+ goto list_remove;
+
+ if (internal->disable_flags) {
+ if (rte_vhost_driver_disable_features(internal->iface_name,
+ internal->disable_flags))
+ goto drv_unreg;
+ }
+
+ if (rte_vhost_driver_callback_register(internal->iface_name,
+ &vhost_ops) < 0) {
+ VHOST_LOG(ERR, "Can't register callbacks\n");
+ goto drv_unreg;
+ }
+
+ if (rte_vhost_driver_start(internal->iface_name) < 0) {
+ VHOST_LOG(ERR, "Failed to start driver for %s\n",
+ internal->iface_name);
+ goto drv_unreg;
+ }
+
+ return 0;
+
+drv_unreg:
+ rte_vhost_driver_unregister(internal->iface_name);
+list_remove:
+ vring_states[eth_dev->data->port_id] = NULL;
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+ rte_free(vring_state);
+free_list:
+ rte_free(list);
+
+ return -1;
+}
+
int
rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event)
return vid;
}
+static int
+eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ /* NOTE: the same process has to operate a vhost interface
+ * from beginning to end (from eth_dev configure to eth_dev close).
+ * It is user's responsibility at the moment.
+ */
+ if (vhost_driver_setup(dev) < 0)
+ return -1;
+
+ internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
+ return 0;
+}
+
static int
eth_dev_start(struct rte_eth_dev *eth_dev)
{
for (i = 0; i < dev->data->nb_tx_queues; i++)
rte_free(dev->data->tx_queues[i]);
- rte_free(dev->data->mac_addrs);
- free(internal->dev_name);
- free(internal->iface_name);
+ rte_free(internal->iface_name);
rte_free(internal);
dev->data->dev_private = NULL;
+
+ rte_free(vring_states[dev->data->port_id]);
+ vring_states[dev->data->port_id] = NULL;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
internal = dev->data->dev_private;
if (internal == NULL) {
VHOST_LOG(ERR, "Invalid device specified\n");
- return;
+ return -ENODEV;
}
dev_info->max_mac_addrs = 1;
dev_info->max_rx_queues = internal->max_queues;
dev_info->max_tx_queues = internal->max_queues;
dev_info->min_rx_bufsize = 0;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return 0;
}
static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
- unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
+ unsigned long rx_total = 0, tx_total = 0;
unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
struct vhost_queue *vq;
continue;
vq = dev->data->tx_queues[i];
stats->q_opackets[i] = vq->stats.pkts;
- tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
stats->q_obytes[i] = vq->stats.bytes;
stats->ipackets = rx_total;
stats->opackets = tx_total;
- stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
return 0;
}
-static void
+static int
eth_stats_reset(struct rte_eth_dev *dev)
{
struct vhost_queue *vq;
vq->stats.bytes = 0;
vq->stats.missed_pkts = 0;
}
+
+ return 0;
}
static void
.rx_queue_intr_disable = eth_rxq_intr_disable,
};
-static struct rte_vdev_driver pmd_vhost_drv;
-
static int
eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
- int16_t queues, const unsigned int numa_node, uint64_t flags)
+ int16_t queues, const unsigned int numa_node, uint64_t flags,
+ uint64_t disable_flags)
{
const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev_data *data;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct ether_addr *eth_addr = NULL;
- struct rte_vhost_vring_state *vring_state = NULL;
- struct internal_list *list = NULL;
+ struct rte_ether_addr *eth_addr = NULL;
VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
numa_node);
- list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
- if (list == NULL)
- goto error;
-
/* reserve an ethdev entry */
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
if (eth_dev == NULL)
goto error;
+ data = eth_dev->data;
eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
if (eth_addr == NULL)
goto error;
+ data->mac_addrs = eth_addr;
*eth_addr = base_eth_addr;
eth_addr->addr_bytes[5] = eth_dev->data->port_id;
- vring_state = rte_zmalloc_socket(name,
- sizeof(*vring_state), 0, numa_node);
- if (vring_state == NULL)
- goto error;
-
/* now put it all together
* - store queue data in internal,
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
internal = eth_dev->data->dev_private;
- internal->dev_name = strdup(name);
- if (internal->dev_name == NULL)
- goto error;
- internal->iface_name = strdup(iface_name);
+ internal->iface_name = rte_malloc_socket(name, strlen(iface_name) + 1,
+ 0, numa_node);
if (internal->iface_name == NULL)
goto error;
+ strcpy(internal->iface_name, iface_name);
- list->eth_dev = eth_dev;
- pthread_mutex_lock(&internal_list_lock);
- TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
-
- rte_spinlock_init(&vring_state->lock);
- vring_states[eth_dev->data->port_id] = vring_state;
-
- data = eth_dev->data;
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
internal->vid = -1;
+ internal->flags = flags;
+ internal->disable_flags = disable_flags;
data->dev_link = pmd_link;
- data->mac_addrs = eth_addr;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_vhost_rx;
eth_dev->tx_pkt_burst = eth_vhost_tx;
- if (rte_vhost_driver_register(iface_name, flags))
- goto error;
-
- if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
- VHOST_LOG(ERR, "Can't register callbacks\n");
- goto error;
- }
-
- if (rte_vhost_driver_start(iface_name) < 0) {
- VHOST_LOG(ERR, "Failed to start driver for %s\n",
- iface_name);
- goto error;
- }
-
rte_eth_dev_probing_finish(eth_dev);
- return data->port_id;
+ return 0;
error:
- if (internal) {
- free(internal->iface_name);
- free(internal->dev_name);
- }
- rte_free(vring_state);
- rte_free(eth_addr);
- if (eth_dev)
- rte_eth_dev_release_port(eth_dev);
- rte_free(internal);
- rte_free(list);
+ if (internal)
+ rte_free(internal->iface_name);
+ rte_eth_dev_release_port(eth_dev);
return -1;
}
char *iface_name;
uint16_t queues;
uint64_t flags = 0;
+ uint64_t disable_flags = 0;
int client_mode = 0;
int dequeue_zero_copy = 0;
int iommu_support = 0;
+ int postcopy_support = 0;
+ int tso = 0;
+ int linear_buf = 0;
+ int ext_buf = 0;
struct rte_eth_dev *eth_dev;
const char *name = rte_vdev_device_name(dev);
VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
VHOST_LOG(ERR, "Failed to probe %s\n", name);
return -1;
}
- /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->rx_pkt_burst = eth_vhost_rx;
+ eth_dev->tx_pkt_burst = eth_vhost_tx;
eth_dev->dev_ops = &ops;
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
+ &open_int, &postcopy_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (postcopy_support)
+ flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
+ &open_int, &tso);
+ if (ret < 0)
+ goto out_free;
+
+ if (tso == 0) {
+ disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
+ disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_LINEAR_BUF) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_VHOST_LINEAR_BUF,
+ &open_int, &linear_buf);
+ if (ret < 0)
+ goto out_free;
+
+ if (linear_buf == 1)
+ flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_EXT_BUF) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_VHOST_EXT_BUF,
+ &open_int, &ext_buf);
+ if (ret < 0)
+ goto out_free;
+
+ if (ext_buf == 1)
+ flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
- eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
- flags);
+ ret = eth_dev_vhost_create(dev, iface_name, queues,
+ dev->device.numa_node, flags, disable_flags);
+ if (ret == -1)
+ VHOST_LOG(ERR, "Failed to create %s\n", name);
out_free:
rte_kvargs_free(kvlist);
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
if (eth_dev == NULL)
- return -ENODEV;
+ return 0;
- eth_dev_close(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
- rte_free(vring_states[eth_dev->data->port_id]);
- vring_states[eth_dev->data->port_id] = NULL;
+ eth_dev_close(eth_dev);
rte_eth_dev_release_port(eth_dev);
RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
- "queues=<int>");
-
-RTE_INIT(vhost_init_log);
-static void
-vhost_init_log(void)
+ "queues=<int> "
+ "client=<0|1> "
+ "dequeue-zero-copy=<0|1> "
+ "iommu-support=<0|1> "
+ "postcopy-support=<0|1> "
+ "tso=<0|1> "
+ "linear-buffer=<0|1> "
+ "ext-buffer=<0|1>");
+
+RTE_INIT(vhost_init_log)
{
vhost_logtype = rte_log_register("pmd.net.vhost");
if (vhost_logtype >= 0)