#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
+#define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
+#define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
ETH_VHOST_IOMMU_SUPPORT,
+ ETH_VHOST_POSTCOPY_SUPPORT,
+ ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
NULL
};
-static struct ether_addr base_eth_addr = {
+static struct rte_ether_addr base_eth_addr = {
.addr_bytes = {
0x56 /* V */,
0x48 /* H */,
char *dev_name;
char *iface_name;
uint16_t max_queues;
- uint16_t vid;
+ int vid;
rte_atomic32_t started;
uint8_t vlan_strip;
};
#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
sizeof(vhost_txport_stat_strings[0]))
-static void
+static int
vhost_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct vhost_queue *vq = NULL;
continue;
memset(&vq->stats, 0, sizeof(vq->stats));
}
+
+ return 0;
}
static int
vhost_count_multicast_broadcast(struct vhost_queue *vq,
struct rte_mbuf *mbuf)
{
- struct ether_addr *ea = NULL;
+ struct rte_ether_addr *ea = NULL;
struct vhost_stats *pstats = &vq->stats;
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
pstats->xstats[VHOST_BROADCAST_PKT]++;
else
pstats->xstats[VHOST_MULTICAST_PKT]++;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- bufs[i]->ol_flags = 0;
bufs[i]->vlan_tci = 0;
if (r->internal->vlan_strip)
struct pmd_internal *internal = dev->data->dev_private;
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- internal->vlan_strip = rxmode->hw_vlan_strip;
-
- if (rxmode->hw_vlan_filter)
- VHOST_LOG(WARNING,
- "vhost(%s): vlan filtering not available\n",
- internal->dev_name);
+ internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
/* won't be NULL */
state = vring_states[eth_dev->data->port_id];
rte_spinlock_lock(&state->lock);
+ if (state->cur[vring] == enable) {
+ rte_spinlock_unlock(&state->lock);
+ return 0;
+ }
state->cur[vring] = enable;
state->max_vring = RTE_MAX(vring, state->max_vring);
rte_spinlock_unlock(&state->lock);
for (i = 0; i < dev->data->nb_tx_queues; i++)
rte_free(dev->data->tx_queues[i]);
- rte_free(dev->data->mac_addrs);
free(internal->dev_name);
free(internal->iface_name);
rte_free(internal);
dev->data->dev_private = NULL;
+
+ rte_free(vring_states[dev->data->port_id]);
+ vring_states[dev->data->port_id] = NULL;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
internal = dev->data->dev_private;
if (internal == NULL) {
VHOST_LOG(ERR, "Invalid device specified\n");
- return;
+ return -ENODEV;
}
dev_info->max_mac_addrs = 1;
dev_info->max_rx_queues = internal->max_queues;
dev_info->max_tx_queues = internal->max_queues;
dev_info->min_rx_bufsize = 0;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return 0;
}
static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
- unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
+ unsigned long rx_total = 0, tx_total = 0;
unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
struct vhost_queue *vq;
continue;
vq = dev->data->tx_queues[i];
stats->q_opackets[i] = vq->stats.pkts;
- tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
stats->q_obytes[i] = vq->stats.bytes;
stats->ipackets = rx_total;
stats->opackets = tx_total;
- stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
return 0;
}
-static void
+static int
eth_stats_reset(struct rte_eth_dev *dev)
{
struct vhost_queue *vq;
vq->stats.bytes = 0;
vq->stats.missed_pkts = 0;
}
+
+ return 0;
}
static void
.rx_queue_intr_disable = eth_rxq_intr_disable,
};
-static struct rte_vdev_driver pmd_vhost_drv;
-
static int
eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
- int16_t queues, const unsigned int numa_node, uint64_t flags)
+ int16_t queues, const unsigned int numa_node, uint64_t flags,
+ uint64_t disable_flags)
{
const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev_data *data;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct ether_addr *eth_addr = NULL;
+ struct rte_ether_addr *eth_addr = NULL;
struct rte_vhost_vring_state *vring_state = NULL;
struct internal_list *list = NULL;
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
if (eth_dev == NULL)
goto error;
+ data = eth_dev->data;
eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
if (eth_addr == NULL)
goto error;
+ data->mac_addrs = eth_addr;
*eth_addr = base_eth_addr;
eth_addr->addr_bytes[5] = eth_dev->data->port_id;
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- data = eth_dev->data;
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
+ internal->vid = -1;
data->dev_link = pmd_link;
- data->mac_addrs = eth_addr;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_CLOSE_REMOVE;
eth_dev->dev_ops = &ops;
if (rte_vhost_driver_register(iface_name, flags))
goto error;
+ if (disable_flags) {
+ if (rte_vhost_driver_disable_features(iface_name,
+ disable_flags))
+ goto error;
+ }
+
if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
VHOST_LOG(ERR, "Can't register callbacks\n");
goto error;
goto error;
}
- return data->port_id;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
error:
if (internal) {
free(internal->dev_name);
}
rte_free(vring_state);
- rte_free(eth_addr);
- if (eth_dev)
- rte_eth_dev_release_port(eth_dev);
- rte_free(internal);
+ rte_eth_dev_release_port(eth_dev);
rte_free(list);
return -1;
char *iface_name;
uint16_t queues;
uint64_t flags = 0;
+ uint64_t disable_flags = 0;
int client_mode = 0;
int dequeue_zero_copy = 0;
int iommu_support = 0;
+ int postcopy_support = 0;
+ int tso = 0;
struct rte_eth_dev *eth_dev;
const char *name = rte_vdev_device_name(dev);
VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
VHOST_LOG(ERR, "Failed to probe %s\n", name);
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
+ &open_int, &postcopy_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (postcopy_support)
+ flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_VHOST_VIRTIO_NET_F_HOST_TSO) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
+ &open_int, &tso);
+ if (ret < 0)
+ goto out_free;
+
+ if (tso == 0) {
+ disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO4);
+ disable_flags |= (1ULL << VIRTIO_NET_F_HOST_TSO6);
+ }
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
- eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
- flags);
+ ret = eth_dev_vhost_create(dev, iface_name, queues,
+ dev->device.numa_node, flags, disable_flags);
+ if (ret == -1)
+ VHOST_LOG(ERR, "Failed to create %s\n", name);
out_free:
rte_kvargs_free(kvlist);
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
if (eth_dev == NULL)
- return -ENODEV;
+ return 0;
- eth_dev_close(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
- rte_free(vring_states[eth_dev->data->port_id]);
- vring_states[eth_dev->data->port_id] = NULL;
+ eth_dev_close(eth_dev);
rte_eth_dev_release_port(eth_dev);
RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
- "queues=<int>");
-
-RTE_INIT(vhost_init_log);
-static void
-vhost_init_log(void)
+ "queues=<int> "
+ "client=<0|1> "
+ "dequeue-zero-copy=<0|1> "
+ "iommu-support=<0|1> "
+ "postcopy-support=<0|1> "
+ "tso=<0|1>");
+
+RTE_INIT(vhost_init_log)
{
vhost_logtype = rte_log_register("pmd.net.vhost");
if (vhost_logtype >= 0)