DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_RSS_HASH)
-int hn_logtype_init;
-int hn_logtype_driver;
-
struct hn_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned int offset;
{ "good_bytes", offsetof(struct hn_stats, bytes) },
{ "errors", offsetof(struct hn_stats, errors) },
{ "ring full", offsetof(struct hn_stats, ring_full) },
+ { "channel full", offsetof(struct hn_stats, channel_full) },
{ "multicast_packets", offsetof(struct hn_stats, multicast) },
{ "broadcast_packets", offsetof(struct hn_stats, broadcast) },
{ "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->intr_handle = &dev->intr_handle;
- /* allow ethdev to remove on close */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
return eth_dev;
}
*/
int
hn_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
+ int wait_to_complete __rte_unused)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_link link, old;
hn_rndis_get_linkspeed(hv);
- hn_vf_link_update(dev, wait_to_complete);
-
link = (struct rte_eth_link) {
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_autoneg = ETH_LINK_SPEED_FIXED,
return error;
}
-static void
+static int
hn_dev_stop(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
hn_rndis_set_rxfilter(hv, 0);
- hn_vf_stop(dev);
+ return hn_vf_stop(dev);
}
-static void
+static int
hn_dev_close(struct rte_eth_dev *dev)
{
+ int ret;
+
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- hn_vf_close(dev);
+ ret = hn_vf_close(dev);
hn_dev_free_queues(dev);
+
+ return ret;
}
static const struct eth_dev_ops hn_eth_dev_ops = {
.dev_stop = hn_dev_stop,
.dev_close = hn_dev_close,
.dev_infos_get = hn_dev_info_get,
+ .txq_info_get = hn_dev_tx_queue_info,
+ .rxq_info_get = hn_dev_rx_queue_info,
.dev_supported_ptypes_get = hn_vf_supported_ptypes,
.promiscuous_enable = hn_dev_promiscuous_enable,
.promiscuous_disable = hn_dev_promiscuous_disable,
vmbus = container_of(device, struct rte_vmbus_device, device);
eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->rx_queue_count = hn_dev_rx_queue_count;
+ eth_dev->rx_descriptor_status = hn_dev_rx_queue_status;
+ eth_dev->tx_descriptor_status = hn_dev_tx_descriptor_status;
eth_dev->tx_pkt_burst = &hn_xmit_pkts;
eth_dev->rx_pkt_burst = &hn_recv_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Since Hyper-V only supports one MAC address */
eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
sizeof(struct rte_ether_addr), 0);
eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct hn_data *hv = eth_dev->data->dev_private;
- int ret;
+ int ret, ret_stop;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- hn_dev_stop(eth_dev);
+ ret_stop = hn_dev_stop(eth_dev);
hn_dev_close(eth_dev);
- eth_dev->dev_ops = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->rx_pkt_burst = NULL;
-
hn_detach(hv);
hn_chim_uninit(eth_dev);
rte_vmbus_chan_close(hv->primary->chan);
if (ret != 0)
return ret;
- return 0;
+ return ret_stop;
}
static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
eth_dev = rte_eth_dev_allocated(dev->device.name);
if (!eth_dev)
- return -ENODEV;
+ return 0; /* port already released */
ret = eth_hn_dev_uninit(eth_dev);
if (ret)
RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
-
-RTE_INIT(hn_init_log)
-{
- hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
- if (hn_logtype_init >= 0)
- rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
- hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
- if (hn_logtype_driver >= 0)
- rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(hn_logtype_init, pmd.net.netvsc.init, NOTICE);
+RTE_LOG_REGISTER(hn_logtype_driver, pmd.net.netvsc.driver, NOTICE);