X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_vf.c;h=1261b2e2ef858aafff8277a3b57f4a65b5d289b0;hb=4710e16a4a7b53c9f2cf38e6f6af945e9af59c26;hp=f68e1f9c54734cfef13bba527d6c724740fb4e6c;hpb=2b9cee18d69b5baf3cfdd9ce7932461c6e02abfe;p=dpdk.git diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c index f68e1f9c54..1261b2e2ef 100644 --- a/drivers/net/netvsc/hn_vf.c +++ b/drivers/net/netvsc/hn_vf.c @@ -10,8 +10,8 @@ #include #include #include +#include #include -#include #include #include @@ -32,17 +32,17 @@ /* Search for VF with matching MAC address, return port id */ static int hn_vf_match(const struct rte_eth_dev *dev) { - const struct ether_addr *mac = dev->data->mac_addrs; + const struct rte_ether_addr *mac = dev->data->mac_addrs; int i; RTE_ETH_FOREACH_DEV(i) { const struct rte_eth_dev *vf_dev = &rte_eth_devices[i]; - const struct ether_addr *vf_mac = vf_dev->data->mac_addrs; + const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs; if (vf_dev == dev) continue; - if (is_same_ether_addr(mac, vf_mac)) + if (rte_is_same_ether_addr(mac, vf_mac)) return i; } return -ENOENT; @@ -167,17 +167,31 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, hn_vf_remove(hv); } +static void +hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim, + const struct rte_eth_desc_lim *vf_lim) +{ + lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max); + lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min); + lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align); + lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); + lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); +} + /* * Merge the info from the VF and synthetic path. * use the default config of the VF * and the minimum number of queues and buffer sizes. */ -static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, +static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, struct rte_eth_dev_info *info) { struct rte_eth_dev_info vf_info; + int ret; - rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + if (ret != 0) + return ret; info->speed_capa = vf_info.speed_capa; info->default_rxportconf = vf_info.default_rxportconf; @@ -193,22 +207,28 @@ static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, info->max_tx_queues); info->tx_offload_capa &= vf_info.tx_offload_capa; info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa; + hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim); info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize, info->min_rx_bufsize); info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen, info->max_rx_pktlen); + hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim); + + return 0; } -void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) +int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) { struct rte_eth_dev *vf_dev; + int ret = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) - hn_vf_info_merge(vf_dev, info); + ret = hn_vf_info_merge(vf_dev, info); rte_spinlock_unlock(&hv->vf_lock); + return ret; } int hn_vf_link_update(struct rte_eth_dev *dev, @@ -355,6 +375,20 @@ void hn_vf_stop(struct rte_eth_dev *dev) rte_spinlock_unlock(&hv->vf_lock); \ } +/* If VF is present, then cascade configuration down */ +#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \ + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + int ret = 0; \ + rte_spinlock_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + ret = func(vf_dev->data->port_id); \ + rte_spinlock_unlock(&hv->vf_lock); \ + return ret; \ + } + void hn_vf_reset(struct rte_eth_dev *dev) { VF_ETHDEV_FUNC(dev, rte_eth_dev_reset); @@ -362,36 +396,45 @@ void hn_vf_reset(struct rte_eth_dev *dev) void hn_vf_close(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_dev_close); + struct hn_data *hv = dev->data->dev_private; + uint16_t vf_port; + + rte_spinlock_lock(&hv->vf_lock); + vf_port = hv->vf_port; + if (vf_port != HN_INVALID_PORT) + rte_eth_dev_close(vf_port); + + hv->vf_port = HN_INVALID_PORT; + rte_spinlock_unlock(&hv->vf_lock); } -void hn_vf_stats_reset(struct rte_eth_dev *dev) +int hn_vf_stats_reset(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_stats_reset); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset); } -void hn_vf_allmulticast_enable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable); } -void hn_vf_allmulticast_disable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable); } -void hn_vf_promiscuous_enable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable); } -void hn_vf_promiscuous_disable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable); } int hn_vf_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct hn_data *hv = dev->data->dev_private; @@ -497,17 +540,19 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; int i, count = 0; - char tmp[RTE_ETH_XSTATS_NAME_SIZE]; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get_names) - count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); rte_spinlock_unlock(&hv->vf_lock); /* add vf_ prefix to xstat names */ if (names) { for (i = 0; i < count; i++) { + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name); strlcpy(names[i].name, tmp, sizeof(names[i].name)); } @@ -518,29 +563,76 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; - int count = 0; + int i, count = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get) - count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); rte_spinlock_unlock(&hv->vf_lock); + /* Offset id's for VF stats */ + if (count > 0) { + for (i = 0; i < count; i++) + xstats[i + offset].id += offset; + } + return count; } -void hn_vf_xstats_reset(struct rte_eth_dev *dev) +int hn_vf_xstats_reset(struct rte_eth_dev *dev) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; + int ret; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_xstats_reset(vf_dev->data->port_id); + else + ret = -EINVAL; + rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_reset) - vf_dev->dev_ops->xstats_reset(vf_dev); + if (vf_dev && vf_dev->dev_ops->rss_hash_update) + ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->reta_update) + ret = vf_dev->dev_ops->reta_update(vf_dev, + reta_conf, reta_size); + rte_spinlock_unlock(&hv->vf_lock); + + return ret; }