#include <errno.h>
#include <unistd.h>
#include <dirent.h>
+#include <fcntl.h>
#include <sys/types.h>
-#include <sys/fcntl.h>
#include <sys/uio.h>
#include <rte_ether.h>
/* Search for VF with matching MAC address, return port id */
static int hn_vf_match(const struct rte_eth_dev *dev)
{
- const struct ether_addr *mac = dev->data->mac_addrs;
+ const struct rte_ether_addr *mac = dev->data->mac_addrs;
int i;
RTE_ETH_FOREACH_DEV(i) {
const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
- const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
+ const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
if (vf_dev == dev)
continue;
- if (is_same_ether_addr(mac, vf_mac))
+ if (rte_is_same_ether_addr(mac, vf_mac))
return i;
}
return -ENOENT;
* use the default config of the VF
* and the minimum number of queues and buffer sizes.
*/
-static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
+static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
struct rte_eth_dev_info *info)
{
struct rte_eth_dev_info vf_info;
+ int ret;
- rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+ ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+ if (ret != 0)
+ return ret;
info->speed_capa = vf_info.speed_capa;
info->default_rxportconf = vf_info.default_rxportconf;
info->min_rx_bufsize);
info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
info->max_rx_pktlen);
+
+ return 0;
}
-void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
+int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
{
struct rte_eth_dev *vf_dev;
+ int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
- hn_vf_info_merge(vf_dev, info);
+ ret = hn_vf_info_merge(vf_dev, info);
rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
}
int hn_vf_link_update(struct rte_eth_dev *dev,
rte_spinlock_unlock(&hv->vf_lock); \
}
+/* If VF is present, then cascade configuration down */
+#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
+ { \
+ struct hn_data *hv = (dev)->data->dev_private; \
+ struct rte_eth_dev *vf_dev; \
+ int ret = 0; \
+ rte_spinlock_lock(&hv->vf_lock); \
+ vf_dev = hn_get_vf_dev(hv); \
+ if (vf_dev) \
+ ret = func(vf_dev->data->port_id); \
+ rte_spinlock_unlock(&hv->vf_lock); \
+ return ret; \
+ }
+
void hn_vf_reset(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
void hn_vf_close(struct rte_eth_dev *dev)
{
- VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
+ struct hn_data *hv = dev->data->dev_private;
+ uint16_t vf_port;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_port = hv->vf_port;
+ if (vf_port != HN_INVALID_PORT)
+ rte_eth_dev_close(vf_port);
+
+ hv->vf_port = HN_INVALID_PORT;
+ rte_spinlock_unlock(&hv->vf_lock);
}
void hn_vf_stats_reset(struct rte_eth_dev *dev)
VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
}
-void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
+int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
{
- VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
+ VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
}
-void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
+int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
{
- VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
+ VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
}
int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct hn_data *hv = dev->data->dev_private;
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int i, count = 0;
- char tmp[RTE_ETH_XSTATS_NAME_SIZE];
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->dev_ops->xstats_get_names)
- count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
+ if (vf_dev)
+ count = rte_eth_xstats_get_names(vf_dev->data->port_id,
+ names, n);
rte_spinlock_unlock(&hv->vf_lock);
/* add vf_ prefix to xstat names */
if (names) {
for (i = 0; i < count; i++) {
+ char tmp[RTE_ETH_XSTATS_NAME_SIZE];
+
snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
strlcpy(names[i].name, tmp, sizeof(names[i].name));
}
int hn_vf_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats,
+ unsigned int offset,
unsigned int n)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
- int count = 0;
+ int i, count = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->dev_ops->xstats_get)
- count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
+ if (vf_dev)
+ count = rte_eth_xstats_get(vf_dev->data->port_id,
+ xstats + offset, n - offset);
rte_spinlock_unlock(&hv->vf_lock);
+ /* Offset id's for VF stats */
+ if (count > 0) {
+ for (i = 0; i < count; i++)
+ xstats[i + offset].id += offset;
+ }
+
return count;
}
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->dev_ops->xstats_reset)
- vf_dev->dev_ops->xstats_reset(vf_dev);
+ if (vf_dev)
+ rte_eth_xstats_reset(vf_dev->data->port_id);
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hn_get_vf_dev(hv);
+ if (vf_dev && vf_dev->dev_ops->rss_hash_update)
+ ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ return ret;
+}
+
+int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hn_get_vf_dev(hv);
+ if (vf_dev && vf_dev->dev_ops->reta_update)
+ ret = vf_dev->dev_ops->reta_update(vf_dev,
+ reta_conf, reta_size);
rte_spinlock_unlock(&hv->vf_lock);
+
+ return ret;
}