PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
hv->vf_port = port_id;
- rte_smp_wmb();
-
return 0;
}
return port;
}
- rte_spinlock_lock(&hv->vf_lock);
err = hn_vf_attach(hv, port);
-
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
hv->vf_intr = (struct rte_intr_handle) {
dev->intr_handle = &hv->vf_intr;
hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
}
- rte_spinlock_unlock(&hv->vf_lock);
return err;
}
static void hn_vf_remove(struct hn_data *hv)
{
- rte_spinlock_lock(&hv->vf_lock);
-
if (!hn_vf_attached(hv)) {
PMD_DRV_LOG(ERR, "VF path not active");
} else {
/* Stop transmission over VF */
hv->vf_port = HN_INVALID_PORT;
- rte_smp_wmb();
/* Give back ownership */
rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
}
- rte_spinlock_unlock(&hv->vf_lock);
}
/* Handle VF association message from host */
vf_assoc->allocated ? "add to" : "remove from",
dev->data->port_id);
+ rte_rwlock_write_lock(&hv->vf_lock);
hv->vf_present = vf_assoc->allocated;
- if (dev->state != RTE_ETH_DEV_ATTACHED)
- return;
-
- if (vf_assoc->allocated)
- hn_vf_add(dev, hv);
- else
- hn_vf_remove(hv);
+ if (dev->state == RTE_ETH_DEV_ATTACHED) {
+ if (vf_assoc->allocated)
+ hn_vf_add(dev, hv);
+ else
+ hn_vf_remove(hv);
+ }
+ rte_rwlock_write_unlock(&hv->vf_lock);
}
static void
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = hn_vf_info_merge(vf_dev, info);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct hn_data *hv = dev->data->dev_private;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
if (hv->vf_port != HN_INVALID_PORT)
ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
const uint32_t *ptypes = NULL;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ptypes;
}
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
/* If VF is present, then cascade configuration down */
{ \
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
- rte_spinlock_lock(&hv->vf_lock); \
+ rte_rwlock_read_lock(&hv->vf_lock); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
func(vf_dev->data->port_id); \
- rte_spinlock_unlock(&hv->vf_lock); \
+ rte_rwlock_read_unlock(&hv->vf_lock); \
}
/* If VF is present, then cascade configuration down */
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
int ret = 0; \
- rte_spinlock_lock(&hv->vf_lock); \
+ rte_rwlock_read_lock(&hv->vf_lock); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
ret = func(vf_dev->data->port_id); \
- rte_spinlock_unlock(&hv->vf_lock); \
+ rte_rwlock_read_unlock(&hv->vf_lock); \
return ret; \
}
struct hn_data *hv = dev->data->dev_private;
uint16_t vf_port;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_port = hv->vf_port;
if (vf_port != HN_INVALID_PORT)
rte_eth_dev_close(vf_port);
hv->vf_port = HN_INVALID_PORT;
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_stats_reset(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, tx_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
{
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
(*vf_dev->dev_ops->tx_queue_release)(subq);
}
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, rx_conf, mp);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
{
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
(*vf_dev->dev_ops->rx_queue_release)(subq);
}
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
int i, count = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get_names(vf_dev->data->port_id,
names, n);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
/* add vf_ prefix to xstat names */
if (names) {
struct rte_eth_dev *vf_dev;
int i, count = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get(vf_dev->data->port_id,
xstats + offset, n - offset);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
/* Offset id's for VF stats */
if (count > 0) {
struct rte_eth_dev *vf_dev;
int ret;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_xstats_reset(vf_dev->data->port_id);
else
ret = -EINVAL;
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rss_hash_update)
ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->reta_update)
ret = vf_dev->dev_ops->reta_update(vf_dev,
reta_conf, reta_size);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return ret;
}