}
eth_dev->device = &dev->device;
+
+ /* interrupt is simulated */
+ dev->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->intr_handle = &dev->intr_handle;
+ /* allow ethdev to remove on close */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
return eth_dev;
}
static void
eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
{
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
/* free ether device */
rte_eth_dev_release_port(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
- eth_dev->data->dev_private = NULL;
-
- /*
- * Secondary process will check the name to attach.
- * Clear this field to avoid attaching a released ports.
- */
- eth_dev->data->name[0] = '\0';
-
eth_dev->device = NULL;
eth_dev->intr_handle = NULL;
}
NULL
};
struct rte_kvargs *kvlist;
+ int ret;
if (!devargs)
return 0;
return -EINVAL;
}
- rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
+ ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Unable to process latency arg\n");
+
rte_kvargs_free(kvlist);
- return 0;
+ return ret;
}
/* Update link status.
* means block this call until link is up.
* which is not worth supporting.
*/
-static int
+int
hn_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+ int wait_to_complete)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_link link, old;
hn_rndis_get_linkspeed(hv);
+ hn_vf_link_update(dev, wait_to_complete);
+
link = (struct rte_eth_link) {
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_autoneg = ETH_LINK_SPEED_FIXED,
dev_info->max_tx_queues = hv->max_queues;
hn_rndis_get_offload(hv, dev_info);
+ hn_vf_info_get(hv, dev_info);
}
static void
struct hn_data *hv = dev->data->dev_private;
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
+ hn_vf_promiscuous_enable(dev);
}
static void
if (dev->data->all_multicast)
filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
hn_rndis_set_rxfilter(hv, filter);
+ hn_vf_promiscuous_disable(dev);
}
static void
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_BROADCAST);
+ hn_vf_allmulticast_enable(dev);
}
static void
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_BROADCAST);
+ hn_vf_allmulticast_disable(dev);
+}
+
+static int
+hn_dev_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ /* No filtering on the synthetic path, but can do it on VF */
+ return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
}
/* Setup shared rx/tx queue data */
return -EINVAL;
}
+ hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
err = hn_rndis_conf_offload(hv, txmode->offloads,
rxmode->offloads);
if (err) {
}
}
- return 0;
+ return hn_vf_configure(dev, dev_conf);
}
static int hn_dev_stats_get(struct rte_eth_dev *dev,
{
unsigned int i;
+ hn_vf_stats_get(dev, stats);
+
for (i = 0; i < dev->data->nb_tx_queues; i++) {
const struct hn_tx_queue *txq = dev->data->tx_queues[i];
}
}
+static void
+hn_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ hn_dev_stats_reset(dev);
+ hn_vf_xstats_reset(dev);
+}
+
+static int
+hn_dev_xstats_count(struct rte_eth_dev *dev)
+{
+ int ret, count;
+
+ count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings);
+ count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+
+ ret = hn_vf_xstats_get_names(dev, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
+}
+
static int
hn_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, t, count = 0;
-
- PMD_INIT_FUNC_TRACE();
+ int ret;
if (!xstats_names)
- return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
- + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ return hn_dev_xstats_count(dev);
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (!txq)
continue;
+ if (count >= limit)
+ break;
+
for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
snprintf(xstats_names[count++].name,
RTE_ETH_XSTATS_NAME_SIZE,
if (!rxq)
continue;
+ if (count >= limit)
+ break;
+
for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
snprintf(xstats_names[count++].name,
RTE_ETH_XSTATS_NAME_SIZE,
hn_stat_strings[t].name);
}
- return count;
+ ret = hn_vf_xstats_get_names(dev, xstats_names + count,
+ limit - count);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
}
static int
unsigned int n)
{
unsigned int i, t, count = 0;
-
- const unsigned int nstats =
- dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
- + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ const unsigned int nstats = hn_dev_xstats_count(dev);
const char *stats;
+ int ret;
PMD_INIT_FUNC_TRACE();
(stats + hn_stat_strings[t].offset);
}
- return count;
+ ret = hn_vf_xstats_get(dev, xstats + count, n - count);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
}
static int
hn_dev_start(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
+ int error;
PMD_INIT_FUNC_TRACE();
- /* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc) {
- PMD_DRV_LOG(ERR, "link status not supported yet");
- return -ENOTSUP;
- }
+ error = hn_rndis_set_rxfilter(hv,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+ if (error)
+ return error;
+
+ error = hn_vf_start(dev);
+ if (error)
+ hn_rndis_set_rxfilter(hv, 0);
- return hn_rndis_set_rxfilter(hv,
- NDIS_PACKET_TYPE_BROADCAST |
- NDIS_PACKET_TYPE_ALL_MULTICAST |
- NDIS_PACKET_TYPE_DIRECTED);
+ return error;
}
static void
PMD_INIT_FUNC_TRACE();
hn_rndis_set_rxfilter(hv, 0);
+ hn_vf_stop(dev);
}
static void
-hn_dev_close(struct rte_eth_dev *dev __rte_unused)
+hn_dev_close(struct rte_eth_dev *dev)
{
- PMD_INIT_LOG(DEBUG, "close");
+ PMD_INIT_FUNC_TRACE();
+
+ hn_vf_close(dev);
+ hn_dev_free_queues(dev);
}
static const struct eth_dev_ops hn_eth_dev_ops = {
.dev_stop = hn_dev_stop,
.dev_close = hn_dev_close,
.dev_infos_get = hn_dev_info_get,
- .txq_info_get = hn_dev_tx_queue_info,
- .rxq_info_get = hn_dev_rx_queue_info,
+ .dev_supported_ptypes_get = hn_vf_supported_ptypes,
.promiscuous_enable = hn_dev_promiscuous_enable,
.promiscuous_disable = hn_dev_promiscuous_disable,
.allmulticast_enable = hn_dev_allmulticast_enable,
.allmulticast_disable = hn_dev_allmulticast_disable,
+ .set_mc_addr_list = hn_dev_mc_addr_list,
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
.rx_queue_release = hn_dev_rx_queue_release,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
+ .stats_reset = hn_dev_stats_reset,
.xstats_get = hn_dev_xstats_get,
.xstats_get_names = hn_dev_xstats_get_names,
- .stats_reset = hn_dev_stats_reset,
- .xstats_reset = hn_dev_stats_reset,
+ .xstats_reset = hn_dev_xstats_reset,
};
/*
hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
+ hv->max_queues = 1;
+ hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
if (err)
return err;
+ strlcpy(hv->owner.name, eth_dev->device->name,
+ RTE_ETH_MAX_OWNER_NAME_LEN);
+ err = rte_eth_dev_owner_new(&hv->owner.id);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Can not get owner id");
+ return err;
+ }
+
/* Initialize primary channel input for control operations */
err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
if (err)
if (err)
goto failed;
+ /* Multi queue requires later versions of windows server */
+ if (hv->nvs_ver < NVS_VERSION_5)
+ return 0;
+
max_chan = rte_vmbus_max_channels(vmbus);
PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
if (max_chan <= 0)
hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
+ /* If VF was reported but not added, do it now */
+ if (hv->vf_present && !hn_vf_attached(hv)) {
+ PMD_INIT_LOG(DEBUG, "Adding VF device");
+
+ err = hn_vf_add(eth_dev, hv);
+ if (err)
+ hv->vf_present = 0;
+ }
+
return 0;
failed:
PMD_INIT_LOG(NOTICE, "device init failed");
+ hn_tx_pool_uninit(eth_dev);
hn_detach(hv);
return err;
}
eth_dev->rx_pkt_burst = NULL;
hn_detach(hv);
+ hn_tx_pool_uninit(eth_dev);
rte_vmbus_chan_close(hv->primary->chan);
rte_free(hv->primary);
-
- eth_dev->data->mac_addrs = NULL;
+ rte_eth_dev_owner_delete(hv->owner.id);
return 0;
}
RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
-RTE_INIT(hn_init_log);
-static void
-hn_init_log(void)
+RTE_INIT(hn_init_log)
{
hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
if (hn_logtype_init >= 0)