X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_vf.c;h=7a3734cadfa492cb00d49946d8ac2c8d2b7662fc;hb=ca041cd44fcc8b22c0e84460254596096e8fe914;hp=27ac87e7e975c6e09fdcfb242e91c9c2b3e65424;hpb=538da7a1cad25fbdffe298c8ca76fc4dbd262d1b;p=dpdk.git diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c index 27ac87e7e9..7a3734cadf 100644 --- a/drivers/net/netvsc/hn_vf.c +++ b/drivers/net/netvsc/hn_vf.c @@ -172,12 +172,15 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, * use the default config of the VF * and the minimum number of queues and buffer sizes. */ -static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, +static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, struct rte_eth_dev_info *info) { struct rte_eth_dev_info vf_info; + int ret; - rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + if (ret != 0) + return ret; info->speed_capa = vf_info.speed_capa; info->default_rxportconf = vf_info.default_rxportconf; @@ -198,17 +201,21 @@ static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, info->min_rx_bufsize); info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen, info->max_rx_pktlen); + + return 0; } -void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) +int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) { struct rte_eth_dev *vf_dev; + int ret = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) - hn_vf_info_merge(vf_dev, info); + ret = hn_vf_info_merge(vf_dev, info); rte_spinlock_unlock(&hv->vf_lock); + return ret; } int hn_vf_link_update(struct rte_eth_dev *dev, @@ -355,6 +362,20 @@ void hn_vf_stop(struct rte_eth_dev *dev) rte_spinlock_unlock(&hv->vf_lock); \ } +/* If VF is present, then cascade configuration down */ +#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \ + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + int ret = 0; \ + rte_spinlock_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + ret = func(vf_dev->data->port_id); \ + rte_spinlock_unlock(&hv->vf_lock); \ + return ret; \ + } + void hn_vf_reset(struct rte_eth_dev *dev) { VF_ETHDEV_FUNC(dev, rte_eth_dev_reset); @@ -374,29 +395,29 @@ void hn_vf_close(struct rte_eth_dev *dev) rte_spinlock_unlock(&hv->vf_lock); } -void hn_vf_stats_reset(struct rte_eth_dev *dev) +int hn_vf_stats_reset(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_stats_reset); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset); } -void hn_vf_allmulticast_enable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable); } -void hn_vf_allmulticast_disable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable); } -void hn_vf_promiscuous_enable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable); } -void hn_vf_promiscuous_disable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable); } int hn_vf_mc_addr_list(struct rte_eth_dev *dev, @@ -506,17 +527,19 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; int i, count = 0; - char tmp[RTE_ETH_XSTATS_NAME_SIZE]; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get_names) - count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); rte_spinlock_unlock(&hv->vf_lock); /* add vf_ prefix to xstat names */ if (names) { for (i = 0; i < count; i++) { + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name); strlcpy(names[i].name, tmp, sizeof(names[i].name)); } @@ -527,29 +550,76 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; - int count = 0; + int i, count = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get) - count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); rte_spinlock_unlock(&hv->vf_lock); + /* Offset id's for VF stats */ + if (count > 0) { + for (i = 0; i < count; i++) + xstats[i + offset].id += offset; + } + return count; } -void hn_vf_xstats_reset(struct rte_eth_dev *dev) +int hn_vf_xstats_reset(struct rte_eth_dev *dev) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; + int ret; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_reset) - vf_dev->dev_ops->xstats_reset(vf_dev); + if (vf_dev) + ret = rte_eth_xstats_reset(vf_dev->data->port_id); + else + ret = -EINVAL; rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rss_hash_update) + ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); + rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->reta_update) + ret = vf_dev->dev_ops->reta_update(vf_dev, + reta_conf, reta_size); + rte_spinlock_unlock(&hv->vf_lock); + + return ret; }