X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_vf.c;h=7a3734cadfa492cb00d49946d8ac2c8d2b7662fc;hb=70d84dc797b7161a4f89bb3b8886159288b6738f;hp=3f714ec990290792d245b2e87cbf25a6dc2a2830;hpb=7415ad0cdc20b732846d9ae3dabed343781499e3;p=dpdk.git diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c index 3f714ec990..7a3734cadf 100644 --- a/drivers/net/netvsc/hn_vf.c +++ b/drivers/net/netvsc/hn_vf.c @@ -10,8 +10,8 @@ #include #include #include +#include #include -#include #include #include @@ -32,34 +32,36 @@ /* Search for VF with matching MAC address, return port id */ static int hn_vf_match(const struct rte_eth_dev *dev) { - const struct ether_addr *mac = dev->data->mac_addrs; - char buf[32]; + const struct rte_ether_addr *mac = dev->data->mac_addrs; int i; - ether_format_addr(buf, sizeof(buf), mac); RTE_ETH_FOREACH_DEV(i) { const struct rte_eth_dev *vf_dev = &rte_eth_devices[i]; - const struct ether_addr *vf_mac = vf_dev->data->mac_addrs; + const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs; if (vf_dev == dev) continue; - ether_format_addr(buf, sizeof(buf), vf_mac); - if (is_same_ether_addr(mac, vf_mac)) + if (rte_is_same_ether_addr(mac, vf_mac)) return i; } return -ENOENT; } + /* * Attach new PCI VF device and return the port_id */ -static int hn_vf_attach(struct hn_data *hv, uint16_t port_id, - struct rte_eth_dev **vf_dev) +static int hn_vf_attach(struct hn_data *hv, uint16_t port_id) { struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER }; int ret; + if (hn_vf_attached(hv)) { + PMD_DRV_LOG(ERR, "VF already attached"); + return -EEXIST; + } + ret = rte_eth_dev_owner_get(port_id, &owner); if (ret < 0) { PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id); @@ -79,8 +81,9 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id, } PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id); + hv->vf_port = port_id; rte_smp_wmb(); - *vf_dev = &rte_eth_devices[port_id]; + return 0; } @@ -96,12 +99,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) } rte_spinlock_lock(&hv->vf_lock); - if (hv->vf_dev) { - PMD_DRV_LOG(ERR, "VF already attached"); - err = -EBUSY; - } else { - err = hn_vf_attach(hv, port, &hv->vf_dev); - } + err = hn_vf_attach(hv, port); if (err == 0) { dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; @@ -120,22 +118,22 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) /* Remove new VF device */ static void hn_vf_remove(struct hn_data *hv) { - struct rte_eth_dev *vf_dev; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; - if (!vf_dev) { + + if (!hn_vf_attached(hv)) { PMD_DRV_LOG(ERR, "VF path not active"); - rte_spinlock_unlock(&hv->vf_lock); - return; - } + } else { + /* Stop incoming packets from arriving on VF */ + hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC); - /* Stop incoming packets from arriving on VF */ - hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC); - hv->vf_dev = NULL; + /* Stop transmission over VF */ + hv->vf_port = HN_INVALID_PORT; + rte_smp_wmb(); - /* Give back ownership */ - rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id); + /* Give back ownership */ + rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id); + } rte_spinlock_unlock(&hv->vf_lock); } @@ -174,12 +172,15 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, * use the default config of the VF * and the minimum number of queues and buffer sizes. */ -static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, +static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, struct rte_eth_dev_info *info) { struct rte_eth_dev_info vf_info; + int ret; - rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + if (ret != 0) + return ret; info->speed_capa = vf_info.speed_capa; info->default_rxportconf = vf_info.default_rxportconf; @@ -200,17 +201,21 @@ static void hn_vf_info_merge(struct rte_eth_dev *vf_dev, info->min_rx_bufsize); info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen, info->max_rx_pktlen); + + return 0; } -void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) +int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) { struct rte_eth_dev *vf_dev; + int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) - hn_vf_info_merge(vf_dev, info); + ret = hn_vf_info_merge(vf_dev, info); rte_spinlock_unlock(&hv->vf_lock); + return ret; } int hn_vf_link_update(struct rte_eth_dev *dev, @@ -221,7 +226,7 @@ int hn_vf_link_update(struct rte_eth_dev *dev, int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->link_update) ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete); rte_spinlock_unlock(&hv->vf_lock); @@ -249,13 +254,14 @@ static int hn_vf_lsc_event(uint16_t port_id __rte_unused, } static int _hn_vf_configure(struct rte_eth_dev *dev, - struct rte_eth_dev *vf_dev, + uint16_t vf_port, const struct rte_eth_conf *dev_conf) { struct rte_eth_conf vf_conf = *dev_conf; - uint16_t vf_port = vf_dev->data->port_id; + struct rte_eth_dev *vf_dev; int ret; + vf_dev = &rte_eth_devices[vf_port]; if (dev_conf->intr_conf.lsc && (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u", @@ -294,13 +300,11 @@ int hn_vf_configure(struct rte_eth_dev *dev, const struct rte_eth_conf *dev_conf) { struct hn_data *hv = dev->data->dev_private; - struct rte_eth_dev *vf_dev; int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; - if (vf_dev) - ret = _hn_vf_configure(dev, vf_dev, dev_conf); + if (hv->vf_port != HN_INVALID_PORT) + ret = _hn_vf_configure(dev, hv->vf_port, dev_conf); rte_spinlock_unlock(&hv->vf_lock); return ret; } @@ -312,7 +316,7 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev) const uint32_t *ptypes = NULL; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get) ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev); rte_spinlock_unlock(&hv->vf_lock); @@ -327,7 +331,7 @@ int hn_vf_start(struct rte_eth_dev *dev) int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_dev_start(vf_dev->data->port_id); rte_spinlock_unlock(&hv->vf_lock); @@ -340,7 +344,7 @@ void hn_vf_stop(struct rte_eth_dev *dev) struct rte_eth_dev *vf_dev; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) rte_eth_dev_stop(vf_dev->data->port_id); rte_spinlock_unlock(&hv->vf_lock); @@ -352,12 +356,26 @@ void hn_vf_stop(struct rte_eth_dev *dev) struct hn_data *hv = (dev)->data->dev_private; \ struct rte_eth_dev *vf_dev; \ rte_spinlock_lock(&hv->vf_lock); \ - vf_dev = hv->vf_dev; \ + vf_dev = hn_get_vf_dev(hv); \ if (vf_dev) \ func(vf_dev->data->port_id); \ rte_spinlock_unlock(&hv->vf_lock); \ } +/* If VF is present, then cascade configuration down */ +#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \ + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + int ret = 0; \ + rte_spinlock_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + ret = func(vf_dev->data->port_id); \ + rte_spinlock_unlock(&hv->vf_lock); \ + return ret; \ + } + void hn_vf_reset(struct rte_eth_dev *dev) { VF_ETHDEV_FUNC(dev, rte_eth_dev_reset); @@ -365,36 +383,45 @@ void hn_vf_reset(struct rte_eth_dev *dev) void hn_vf_close(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_dev_close); + struct hn_data *hv = dev->data->dev_private; + uint16_t vf_port; + + rte_spinlock_lock(&hv->vf_lock); + vf_port = hv->vf_port; + if (vf_port != HN_INVALID_PORT) + rte_eth_dev_close(vf_port); + + hv->vf_port = HN_INVALID_PORT; + rte_spinlock_unlock(&hv->vf_lock); } -void hn_vf_stats_reset(struct rte_eth_dev *dev) +int hn_vf_stats_reset(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_stats_reset); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset); } -void hn_vf_allmulticast_enable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable); } -void hn_vf_allmulticast_disable(struct rte_eth_dev *dev) +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable); } -void hn_vf_promiscuous_enable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable); } -void hn_vf_promiscuous_disable(struct rte_eth_dev *dev) +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev) { - VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable); + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable); } int hn_vf_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct hn_data *hv = dev->data->dev_private; @@ -402,7 +429,7 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev, int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id, mc_addr_set, nb_mc_addr); @@ -420,7 +447,7 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_tx_queue_setup(vf_dev->data->port_id, queue_idx, nb_desc, @@ -434,7 +461,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) struct rte_eth_dev *vf_dev; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->tx_queue_release) { void *subq = vf_dev->data->tx_queues[queue_id]; @@ -455,7 +482,7 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_rx_queue_setup(vf_dev->data->port_id, queue_idx, nb_desc, @@ -469,7 +496,7 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id) struct rte_eth_dev *vf_dev; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->rx_queue_release) { void *subq = vf_dev->data->rx_queues[queue_id]; @@ -486,7 +513,7 @@ int hn_vf_stats_get(struct rte_eth_dev *dev, int ret = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; + vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_stats_get(vf_dev->data->port_id, stats); rte_spinlock_unlock(&hv->vf_lock); @@ -500,17 +527,19 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; int i, count = 0; - char tmp[RTE_ETH_XSTATS_NAME_SIZE]; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; - if (vf_dev && vf_dev->dev_ops->xstats_get_names) - count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); rte_spinlock_unlock(&hv->vf_lock); /* add vf_ prefix to xstat names */ if (names) { for (i = 0; i < count; i++) { + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name); strlcpy(names[i].name, tmp, sizeof(names[i].name)); } @@ -521,29 +550,76 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; - int count = 0; + int i, count = 0; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; - if (vf_dev && vf_dev->dev_ops->xstats_get) - count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); rte_spinlock_unlock(&hv->vf_lock); + /* Offset id's for VF stats */ + if (count > 0) { + for (i = 0; i < count; i++) + xstats[i + offset].id += offset; + } + return count; } -void hn_vf_xstats_reset(struct rte_eth_dev *dev) +int hn_vf_xstats_reset(struct rte_eth_dev *dev) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; + int ret; rte_spinlock_lock(&hv->vf_lock); - vf_dev = hv->vf_dev; - if (vf_dev && vf_dev->dev_ops->xstats_reset) - vf_dev->dev_ops->xstats_reset(vf_dev); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_xstats_reset(vf_dev->data->port_id); + else + ret = -EINVAL; rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rss_hash_update) + ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); + rte_spinlock_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_spinlock_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->reta_update) + ret = vf_dev->dev_ops->reta_update(vf_dev, + reta_conf, reta_size); + rte_spinlock_unlock(&hv->vf_lock); + + return ret; }