DEV_TX_OFFLOAD_VLAN_INSERT)
#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP)
-
-int hn_logtype_init;
-int hn_logtype_driver;
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_RSS_HASH)
struct hn_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
{ "good_bytes", offsetof(struct hn_stats, bytes) },
{ "errors", offsetof(struct hn_stats, errors) },
{ "ring full", offsetof(struct hn_stats, ring_full) },
+ { "channel full", offsetof(struct hn_stats, channel_full) },
{ "multicast_packets", offsetof(struct hn_stats, multicast) },
{ "broadcast_packets", offsetof(struct hn_stats, broadcast) },
{ "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
{ "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
};
+/* The default RSS key.
+ * This value is the same as MLX5 so that flows will be
+ * received on same path for both VF and synthetic NIC.
+ */
+static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
+ 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
static struct rte_eth_dev *
eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
{
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->intr_handle = &dev->intr_handle;
- /* allow ethdev to remove on close */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
return eth_dev;
}
static void
eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
{
- /* mac_addrs must not be freed alone because part of dev_private */
- eth_dev->data->mac_addrs = NULL;
/* free ether device */
rte_eth_dev_release_port(eth_dev);
*/
int
hn_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
+ int wait_to_complete __rte_unused)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_link link, old;
hn_rndis_get_linkspeed(hv);
- hn_vf_link_update(dev, wait_to_complete);
-
link = (struct rte_eth_link) {
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_autoneg = ETH_LINK_SPEED_FIXED,
return rte_eth_linkstatus_set(dev, &link);
}
-static void hn_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int hn_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct hn_data *hv = dev->data->dev_private;
+ int rc;
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
dev_info->max_mac_addrs = 1;
dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
- dev_info->flow_type_rss_offloads =
- ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
+ dev_info->flow_type_rss_offloads = hv->rss_offloads;
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->max_rx_queues = hv->max_queues;
dev_info->max_tx_queues = hv->max_queues;
- hn_rndis_get_offload(hv, dev_info);
- hn_vf_info_get(hv, dev_info);
+ dev_info->tx_desc_lim.nb_min = 1;
+ dev_info->tx_desc_lim.nb_max = 4096;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* fills in rx and tx offload capability */
+ rc = hn_rndis_get_offload(hv, dev_info);
+ if (rc != 0)
+ return rc;
+
+ /* merges the offload and queues of vf */
+ return hn_vf_info_get(hv, dev_info);
}
-static void
+static int hn_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ unsigned int i;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size != NDIS_HASH_INDCNT) {
+ PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++) {
+ uint16_t idx = i / RTE_RETA_GROUP_SIZE;
+ uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+ uint64_t mask = (uint64_t)1 << shift;
+
+ if (reta_conf[idx].mask & mask)
+ hv->rss_ind[i] = reta_conf[idx].reta[shift];
+ }
+
+ err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss disable failed");
+ return err;
+ }
+
+ err = hn_rndis_conf_rss(hv, 0);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "reta reconfig failed");
+ return err;
+ }
+
+ return hn_vf_reta_hash_update(dev, reta_conf, reta_size);
+}
+
+static int hn_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size != NDIS_HASH_INDCNT) {
+ PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++) {
+ uint16_t idx = i / RTE_RETA_GROUP_SIZE;
+ uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+ uint64_t mask = (uint64_t)1 << shift;
+
+ if (reta_conf[idx].mask & mask)
+ reta_conf[idx].reta[shift] = hv->rss_ind[i];
+ }
+ return 0;
+}
+
+static void hn_rss_hash_init(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ /* Convert from DPDK RSS hash flags to NDIS hash flags */
+ hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ hv->rss_hash |= NDIS_HASH_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hv->rss_hash |= NDIS_HASH_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ hv->rss_hash |= NDIS_HASH_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+ hv->rss_hash |= NDIS_HASH_IPV6_EX;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hv->rss_hash |= NDIS_HASH_TCP_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+ hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
+
+ memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
+ NDIS_HASH_KEYSIZE_TOEPLITZ);
+}
+
+static int hn_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss disable failed");
+ return err;
+ }
+
+ hn_rss_hash_init(hv, rss_conf);
+
+ if (rss_conf->rss_hf != 0) {
+ err = hn_rndis_conf_rss(hv, 0);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss reconfig failed (RSS disabled)");
+ return err;
+ }
+ }
+
+ return hn_vf_rss_hash_update(dev, rss_conf);
+}
+
+static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hv->ndis_ver < NDIS_VERSION_6_20) {
+ PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
+ return -EOPNOTSUPP;
+ }
+
+ rss_conf->rss_key_len = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ if (rss_conf->rss_key)
+ memcpy(rss_conf->rss_key, hv->rss_key,
+ NDIS_HASH_KEYSIZE_TOEPLITZ);
+
+ rss_conf->rss_hf = 0;
+ if (hv->rss_hash & NDIS_HASH_IPV4)
+ rss_conf->rss_hf |= ETH_RSS_IPV4;
+
+ if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+ if (hv->rss_hash & NDIS_HASH_IPV6)
+ rss_conf->rss_hf |= ETH_RSS_IPV6;
+
+ if (hv->rss_hash & NDIS_HASH_IPV6_EX)
+ rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+
+ if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+
+ if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
+ rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+
+ return 0;
+}
+
+static int
hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
- hn_vf_promiscuous_enable(dev);
+ return hn_vf_promiscuous_enable(dev);
}
-static void
+static int
hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
if (dev->data->all_multicast)
filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
hn_rndis_set_rxfilter(hv, filter);
- hn_vf_promiscuous_disable(dev);
+ return hn_vf_promiscuous_disable(dev);
}
-static void
+static int
hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_BROADCAST);
- hn_vf_allmulticast_enable(dev);
+ return hn_vf_allmulticast_enable(dev);
}
-static void
+static int
hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_BROADCAST);
- hn_vf_allmulticast_disable(dev);
+ return hn_vf_allmulticast_disable(dev);
}
static int
static int hn_dev_configure(struct rte_eth_dev *dev)
{
- const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct rte_eth_rss_conf *rss_conf = &dev_conf->rx_adv_conf.rss_conf;
const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
const struct rte_eth_txmode *txmode = &dev_conf->txmode;
-
- const struct rte_eth_rss_conf *rss_conf =
- &dev_conf->rx_adv_conf.rss_conf;
struct hn_data *hv = dev->data->dev_private;
uint64_t unsupported;
- int err, subchan;
+ int i, err, subchan;
PMD_INIT_FUNC_TRACE();
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
if (unsupported) {
PMD_DRV_LOG(NOTICE,
hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++)
+ hv->rss_ind[i] = i % dev->data->nb_rx_queues;
+
+ hn_rss_hash_init(hv, rss_conf);
+
subchan = hv->num_queues - 1;
if (subchan > 0) {
err = hn_subchan_configure(hv, subchan);
return err;
}
- err = hn_rndis_conf_rss(hv, rss_conf);
+ err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
if (err) {
PMD_DRV_LOG(NOTICE,
- "rss configuration failed");
+ "rss disable failed");
return err;
}
+
+ if (rss_conf->rss_hf != 0) {
+ err = hn_rndis_conf_rss(hv, 0);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "initial RSS config failed");
+ return err;
+ }
+ }
}
return hn_vf_configure(dev, dev_conf);
return 0;
}
-static void
+static int
hn_dev_stats_reset(struct rte_eth_dev *dev)
{
unsigned int i;
memset(&rxq->stats, 0, sizeof(struct hn_stats));
}
+
+ return 0;
}
-static void
+static int
hn_dev_xstats_reset(struct rte_eth_dev *dev)
{
- hn_dev_stats_reset(dev);
- hn_vf_xstats_reset(dev);
+ int ret;
+
+ ret = hn_dev_stats_reset(dev);
+ if (ret != 0)
+ return 0;
+
+ return hn_vf_xstats_reset(dev);
}
static int
continue;
stats = (const char *)&txq->stats;
- for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
- xstats[count++].value = *(const uint64_t *)
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
+ xstats[count].id = count;
+ xstats[count].value = *(const uint64_t *)
(stats + hn_stat_strings[t].offset);
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
continue;
stats = (const char *)&rxq->stats;
- for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
- xstats[count++].value = *(const uint64_t *)
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
+ xstats[count].id = count;
+ xstats[count].value = *(const uint64_t *)
(stats + hn_stat_strings[t].offset);
+ }
}
- ret = hn_vf_xstats_get(dev, xstats + count, n - count);
+ ret = hn_vf_xstats_get(dev, xstats, count, n);
if (ret < 0)
return ret;
if (error)
hn_rndis_set_rxfilter(hv, 0);
+ /* Initialize Link state */
+ if (error == 0)
+ hn_dev_link_update(dev, 0);
+
return error;
}
-static void
+static int
hn_dev_stop(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
hn_rndis_set_rxfilter(hv, 0);
- hn_vf_stop(dev);
+ return hn_vf_stop(dev);
}
-static void
+static int
hn_dev_close(struct rte_eth_dev *dev)
{
+ int ret;
+
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- hn_vf_close(dev);
+ ret = hn_vf_close(dev);
hn_dev_free_queues(dev);
+
+ return ret;
}
static const struct eth_dev_ops hn_eth_dev_ops = {
.dev_stop = hn_dev_stop,
.dev_close = hn_dev_close,
.dev_infos_get = hn_dev_info_get,
+ .txq_info_get = hn_dev_tx_queue_info,
+ .rxq_info_get = hn_dev_rx_queue_info,
.dev_supported_ptypes_get = hn_vf_supported_ptypes,
.promiscuous_enable = hn_dev_promiscuous_enable,
.promiscuous_disable = hn_dev_promiscuous_disable,
.allmulticast_enable = hn_dev_allmulticast_enable,
.allmulticast_disable = hn_dev_allmulticast_disable,
.set_mc_addr_list = hn_dev_mc_addr_list,
+ .reta_update = hn_rss_reta_update,
+ .reta_query = hn_rss_reta_query,
+ .rss_hash_update = hn_rss_hash_update,
+ .rss_hash_conf_get = hn_rss_hash_conf_get,
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
vmbus = container_of(device, struct rte_vmbus_device, device);
eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->rx_queue_count = hn_dev_rx_queue_count;
+ eth_dev->rx_descriptor_status = hn_dev_rx_queue_status;
+ eth_dev->tx_descriptor_status = hn_dev_tx_descriptor_status;
eth_dev->tx_pkt_burst = &hn_xmit_pkts;
eth_dev->rx_pkt_burst = &hn_recv_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- /* Since Hyper-V only supports one MAC address, just use local data */
- eth_dev->data->mac_addrs = &hv->mac_addr;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
+ /* Since Hyper-V only supports one MAC address */
+ eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
+ sizeof(struct rte_ether_addr), 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory store MAC addresses");
+ return -ENOMEM;
+ }
hv->vmbus = vmbus;
hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
hv->max_queues = 1;
+ rte_rwlock_init(&hv->vf_lock);
hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
if (err)
goto failed;
- err = hn_tx_pool_init(eth_dev);
+ err = hn_chim_init(eth_dev);
if (err)
goto failed;
- err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
+ err = hn_rndis_get_eaddr(hv, eth_dev->data->mac_addrs->addr_bytes);
if (err)
goto failed;
failed:
PMD_INIT_LOG(NOTICE, "device init failed");
- hn_tx_pool_uninit(eth_dev);
+ hn_chim_uninit(eth_dev);
hn_detach(hv);
return err;
}
eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct hn_data *hv = eth_dev->data->dev_private;
+ int ret, ret_stop;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- hn_dev_stop(eth_dev);
+ ret_stop = hn_dev_stop(eth_dev);
hn_dev_close(eth_dev);
- eth_dev->dev_ops = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->rx_pkt_burst = NULL;
-
hn_detach(hv);
- hn_tx_pool_uninit(eth_dev);
+ hn_chim_uninit(eth_dev);
rte_vmbus_chan_close(hv->primary->chan);
rte_free(hv->primary);
- rte_eth_dev_owner_delete(hv->owner.id);
+ ret = rte_eth_dev_owner_delete(hv->owner.id);
+ if (ret != 0)
+ return ret;
- return 0;
+ return ret_stop;
}
static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
eth_dev = rte_eth_dev_allocated(dev->device.name);
if (!eth_dev)
- return -ENODEV;
+ return 0; /* port already released */
ret = eth_hn_dev_uninit(eth_dev);
if (ret)
RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
-
-RTE_INIT(hn_init_log)
-{
- hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
- if (hn_logtype_init >= 0)
- rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
- hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
- if (hn_logtype_driver >= 0)
- rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(hn_logtype_init, pmd.net.netvsc.init, NOTICE);
+RTE_LOG_REGISTER(hn_logtype_driver, pmd.net.netvsc.driver, NOTICE);