return &rte_eth_devices[vf_port];
}
-void hn_vf_info_get(struct hn_data *hv,
+int hn_vf_info_get(struct hn_data *hv,
struct rte_eth_dev_info *info);
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
int hn_vf_configure(struct rte_eth_dev *dev,
* use the default config of the VF
* and the minimum number of queues and buffer sizes.
*/
-static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
+static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
struct rte_eth_dev_info *info)
{
struct rte_eth_dev_info vf_info;
+ int ret;
- rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+ ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+ if (ret != 0)
+ return ret;
info->speed_capa = vf_info.speed_capa;
info->default_rxportconf = vf_info.default_rxportconf;
info->min_rx_bufsize);
info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
info->max_rx_pktlen);
+
+ return 0;
}
-void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
+int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
{
struct rte_eth_dev *vf_dev;
+ int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
- hn_vf_info_merge(vf_dev, info);
+ ret = hn_vf_info_merge(vf_dev, info);
rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
}
int hn_vf_link_update(struct rte_eth_dev *dev,