With multiple channels, the primary channel may receive notification
that VF has been added or removed while secondary channel is in
process of doing receive or transmit. Resolve this race by converting
existing vf_lock to a reader/writer lock.
Users of lock (tx/rx/stats) acquire for read, and actions like
add/remove acquire it for write.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
hv->max_queues = 1;
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
hv->max_queues = 1;
- rte_spinlock_init(&hv->vf_lock);
+ rte_rwlock_init(&hv->vf_lock);
hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
hn_process_events(hv, txq->queue_id, 0);
/* Transmit over VF if present and up */
hn_process_events(hv, txq->queue_id, 0);
/* Transmit over VF if present and up */
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->data->dev_started) {
void *sub_q = vf_dev->data->tx_queues[queue_id];
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->data->dev_started) {
void *sub_q = vf_dev->data->tx_queues[queue_id];
- return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ rte_rwlock_read_unlock(&hv->vf_lock);
+ return nb_tx;
+ rte_rwlock_read_unlock(&hv->vf_lock);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
(void **)rx_pkts, nb_pkts, NULL);
/* If VF is available, check that as well */
(void **)rx_pkts, nb_pkts, NULL);
/* If VF is available, check that as well */
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->data->dev_started)
nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
rx_pkts + nb_rcv, nb_pkts - nb_rcv);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->data->dev_started)
nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct hn_data {
struct rte_vmbus_device *vmbus;
struct hn_rx_queue *primary;
struct hn_data {
struct rte_vmbus_device *vmbus;
struct hn_rx_queue *primary;
- rte_spinlock_t vf_lock;
uint16_t port_id;
uint16_t vf_port;
uint16_t port_id;
uint16_t vf_port;
return hv->vf_port != HN_INVALID_PORT;
}
return hv->vf_port != HN_INVALID_PORT;
}
-/* Get VF device for existing netvsc device */
+/*
+ * Get VF device for existing netvsc device
+ * Assumes vf_lock is held.
+ */
static inline struct rte_eth_dev *
hn_get_vf_dev(const struct hn_data *hv)
{
uint16_t vf_port = hv->vf_port;
static inline struct rte_eth_dev *
hn_get_vf_dev(const struct hn_data *hv)
{
uint16_t vf_port = hv->vf_port;
- /* make sure vf_port is loaded */
- rte_smp_rmb();
-
if (vf_port == HN_INVALID_PORT)
return NULL;
else
if (vf_port == HN_INVALID_PORT)
return NULL;
else
PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
hv->vf_port = port_id;
PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
hv->vf_port = port_id;
- rte_spinlock_lock(&hv->vf_lock);
err = hn_vf_attach(hv, port);
err = hn_vf_attach(hv, port);
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
hv->vf_intr = (struct rte_intr_handle) {
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
hv->vf_intr = (struct rte_intr_handle) {
dev->intr_handle = &hv->vf_intr;
hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
}
dev->intr_handle = &hv->vf_intr;
hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
}
- rte_spinlock_unlock(&hv->vf_lock);
static void hn_vf_remove(struct hn_data *hv)
{
static void hn_vf_remove(struct hn_data *hv)
{
- rte_spinlock_lock(&hv->vf_lock);
-
if (!hn_vf_attached(hv)) {
PMD_DRV_LOG(ERR, "VF path not active");
} else {
if (!hn_vf_attached(hv)) {
PMD_DRV_LOG(ERR, "VF path not active");
} else {
/* Stop transmission over VF */
hv->vf_port = HN_INVALID_PORT;
/* Stop transmission over VF */
hv->vf_port = HN_INVALID_PORT;
/* Give back ownership */
rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
}
/* Give back ownership */
rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
}
- rte_spinlock_unlock(&hv->vf_lock);
}
/* Handle VF association message from host */
}
/* Handle VF association message from host */
vf_assoc->allocated ? "add to" : "remove from",
dev->data->port_id);
vf_assoc->allocated ? "add to" : "remove from",
dev->data->port_id);
+ rte_rwlock_write_lock(&hv->vf_lock);
hv->vf_present = vf_assoc->allocated;
hv->vf_present = vf_assoc->allocated;
- if (dev->state != RTE_ETH_DEV_ATTACHED)
- return;
-
- if (vf_assoc->allocated)
- hn_vf_add(dev, hv);
- else
- hn_vf_remove(hv);
+ if (dev->state == RTE_ETH_DEV_ATTACHED) {
+ if (vf_assoc->allocated)
+ hn_vf_add(dev, hv);
+ else
+ hn_vf_remove(hv);
+ }
+ rte_rwlock_write_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = hn_vf_info_merge(vf_dev, info);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = hn_vf_info_merge(vf_dev, info);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct hn_data *hv = dev->data->dev_private;
int ret = 0;
struct hn_data *hv = dev->data->dev_private;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
if (hv->vf_port != HN_INVALID_PORT)
ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
if (hv->vf_port != HN_INVALID_PORT)
ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
const uint32_t *ptypes = NULL;
struct rte_eth_dev *vf_dev;
const uint32_t *ptypes = NULL;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
/* If VF is present, then cascade configuration down */
}
/* If VF is present, then cascade configuration down */
{ \
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
{ \
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
- rte_spinlock_lock(&hv->vf_lock); \
+ rte_rwlock_read_lock(&hv->vf_lock); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
func(vf_dev->data->port_id); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
func(vf_dev->data->port_id); \
- rte_spinlock_unlock(&hv->vf_lock); \
+ rte_rwlock_read_unlock(&hv->vf_lock); \
}
/* If VF is present, then cascade configuration down */
}
/* If VF is present, then cascade configuration down */
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
int ret = 0; \
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
int ret = 0; \
- rte_spinlock_lock(&hv->vf_lock); \
+ rte_rwlock_read_lock(&hv->vf_lock); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
ret = func(vf_dev->data->port_id); \
vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
ret = func(vf_dev->data->port_id); \
- rte_spinlock_unlock(&hv->vf_lock); \
+ rte_rwlock_read_unlock(&hv->vf_lock); \
struct hn_data *hv = dev->data->dev_private;
uint16_t vf_port;
struct hn_data *hv = dev->data->dev_private;
uint16_t vf_port;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_port = hv->vf_port;
if (vf_port != HN_INVALID_PORT)
rte_eth_dev_close(vf_port);
hv->vf_port = HN_INVALID_PORT;
vf_port = hv->vf_port;
if (vf_port != HN_INVALID_PORT)
rte_eth_dev_close(vf_port);
hv->vf_port = HN_INVALID_PORT;
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_stats_reset(struct rte_eth_dev *dev)
}
int hn_vf_stats_reset(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, tx_conf);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, tx_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
{
struct rte_eth_dev *vf_dev;
{
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
(*vf_dev->dev_ops->tx_queue_release)(subq);
}
(*vf_dev->dev_ops->tx_queue_release)(subq);
}
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
}
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, rx_conf, mp);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, rx_conf, mp);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
{
struct rte_eth_dev *vf_dev;
{
struct rte_eth_dev *vf_dev;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
(*vf_dev->dev_ops->rx_queue_release)(subq);
}
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
(*vf_dev->dev_ops->rx_queue_release)(subq);
}
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
}
int hn_vf_stats_get(struct rte_eth_dev *dev,
}
int hn_vf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int i, count = 0;
struct rte_eth_dev *vf_dev;
int i, count = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get_names(vf_dev->data->port_id,
names, n);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get_names(vf_dev->data->port_id,
names, n);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
/* add vf_ prefix to xstat names */
if (names) {
/* add vf_ prefix to xstat names */
if (names) {
struct rte_eth_dev *vf_dev;
int i, count = 0;
struct rte_eth_dev *vf_dev;
int i, count = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get(vf_dev->data->port_id,
xstats + offset, n - offset);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
count = rte_eth_xstats_get(vf_dev->data->port_id,
xstats + offset, n - offset);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
/* Offset id's for VF stats */
if (count > 0) {
/* Offset id's for VF stats */
if (count > 0) {
struct rte_eth_dev *vf_dev;
int ret;
struct rte_eth_dev *vf_dev;
int ret;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_xstats_reset(vf_dev->data->port_id);
else
ret = -EINVAL;
vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_xstats_reset(vf_dev->data->port_id);
else
ret = -EINVAL;
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rss_hash_update)
ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rss_hash_update)
ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
struct rte_eth_dev *vf_dev;
int ret = 0;
struct rte_eth_dev *vf_dev;
int ret = 0;
- rte_spinlock_lock(&hv->vf_lock);
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->reta_update)
ret = vf_dev->dev_ops->reta_update(vf_dev,
reta_conf, reta_size);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->reta_update)
ret = vf_dev->dev_ops->reta_update(vf_dev,
reta_conf, reta_size);
- rte_spinlock_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);