struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
+ struct mlx5_vdpa_virtq *virtq;
int ret;
if (priv == NULL) {
DRV_LOG(ERR, "Too big vring id: %d.", vring);
return -E2BIG;
}
- pthread_mutex_lock(&priv->vq_config_lock);
+ virtq = &priv->virtqs[vring];
+ pthread_mutex_lock(&virtq->virtq_lock);
ret = mlx5_vdpa_virtq_enable(priv, vring, state);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ pthread_mutex_unlock(&virtq->virtq_lock);
return ret;
}
ret |= mlx5_vdpa_lm_log(priv);
priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
}
+ pthread_mutex_lock(&priv->steer_update_lock);
mlx5_vdpa_steer_unset(priv);
+ pthread_mutex_unlock(&priv->steer_update_lock);
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_drain_cq(priv);
if (priv->lm_mr.addr)
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- /* The mutex may stay locked after event thread cancel - initiate it. */
- pthread_mutex_init(&priv->vq_config_lock, NULL);
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
uint32_t index;
uint32_t i;
+ for (index = 0; index < priv->caps.max_num_virtio_queues * 2;
+ index++) {
+ virtq = &priv->virtqs[index];
+ pthread_mutex_init(&virtq->virtq_lock, NULL);
+ }
if (!priv->queues)
return 0;
for (index = 0; index < (priv->queues * 2); ++index) {
- struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
+ virtq = &priv->virtqs[index];
int ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,
- -1, &virtq->eqp);
+ -1, virtq);
if (ret) {
DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
priv->num_lag_ports = attr->num_lag_ports;
if (attr->num_lag_ports == 0)
priv->num_lag_ports = 1;
- pthread_mutex_init(&priv->vq_config_lock, NULL);
+ rte_spinlock_init(&priv->db_lock);
+ pthread_mutex_init(&priv->steer_update_lock, NULL);
priv->cdev = cdev;
mlx5_vdpa_config_get(mkvlist, priv);
if (mlx5_vdpa_create_dev_resources(priv))
mlx5_vdpa_release_dev_resources(priv);
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
- pthread_mutex_destroy(&priv->vq_config_lock);
rte_free(priv);
}
bool stopped;
uint32_t configured:1;
uint32_t version;
+ pthread_mutex_t virtq_lock;
struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
struct mlx5_devx_obj *counters;
TAILQ_ENTRY(mlx5_vdpa_priv) next;
bool connected;
enum mlx5_dev_state state;
- pthread_mutex_t vq_config_lock;
+ rte_spinlock_t db_lock;
+ pthread_mutex_t steer_update_lock;
uint64_t no_traffic_counter;
pthread_t timer_tid;
int event_mode;
* Number of descriptors.
* @param[in] callfd
* The guest notification file descriptor.
- * @param[in/out] eqp
- * Pointer to the event QP structure.
+ * @param[in/out] virtq
+ * Pointer to the virt-queue structure.
*
* @return
* 0 on success, -1 otherwise and rte_errno is set.
*/
-int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
- int callfd, struct mlx5_vdpa_event_qp *eqp);
+int
+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
+ int callfd, struct mlx5_vdpa_virtq *virtq);
/**
* Destroy an event QP and all its related resources.
static int
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
- int callfd, struct mlx5_vdpa_cq *cq)
+ int callfd, struct mlx5_vdpa_virtq *virtq)
{
struct mlx5_devx_cq_attr attr = {
.use_first_only = 1,
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
};
+ struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
uint16_t event_nums[1] = {0};
int ret;
cq->log_desc_n = log_desc_n;
rte_spinlock_init(&cq->sl);
/* Subscribe CQ event to the event channel controlled by the driver. */
- ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
- cq->cq_obj.cq->obj,
- sizeof(event_nums), event_nums,
- (uint64_t)(uintptr_t)cq);
+ ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
+ cq->cq_obj.cq->obj,
+ sizeof(event_nums),
+ event_nums,
+ (uint64_t)(uintptr_t)virtq);
if (ret) {
DRV_LOG(ERR, "Failed to subscribe CQE event.");
rte_errno = errno;
static void
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
struct mlx5_vdpa_cq *cq;
int i;
for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
cq = &priv->virtqs[i].eqp.cq;
if (cq->cq_obj.cq && !cq->armed)
mlx5_vdpa_cq_arm(priv, cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
static uint32_t
mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
{
- int i;
+ struct mlx5_vdpa_virtq *virtq;
+ struct mlx5_vdpa_cq *cq;
uint32_t max = 0;
+ uint32_t comp;
+ int i;
for (i = 0; i < priv->nr_virtqs; i++) {
- struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
- uint32_t comp = mlx5_vdpa_queue_complete(cq);
-
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
+ cq = &virtq->eqp.cq;
+ comp = mlx5_vdpa_queue_complete(cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (comp > max)
max = comp;
}
}
/* Wait on all CQs channel for completion event. */
-static struct mlx5_vdpa_cq *
+static struct mlx5_vdpa_virtq *
mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
{
#ifdef HAVE_IBV_DEVX_EVENT
sizeof(out.buf));
if (ret >= 0)
- return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
+ return (struct mlx5_vdpa_virtq *)
+ (uintptr_t)out.event_resp.cookie;
DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
ret, errno);
#endif
mlx5_vdpa_event_handle(void *arg)
{
struct mlx5_vdpa_priv *priv = arg;
- struct mlx5_vdpa_cq *cq;
+ struct mlx5_vdpa_virtq *virtq;
uint32_t max;
switch (priv->event_mode) {
case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
priv->timer_delay_us = priv->event_us;
while (1) {
- pthread_mutex_lock(&priv->vq_config_lock);
max = mlx5_vdpa_queues_complete(priv);
if (max == 0 && priv->no_traffic_counter++ >=
priv->no_traffic_max) {
priv->vdev->device->name);
mlx5_vdpa_arm_all_cqs(priv);
do {
- pthread_mutex_unlock
- (&priv->vq_config_lock);
- cq = mlx5_vdpa_event_wait(priv);
- pthread_mutex_lock
- (&priv->vq_config_lock);
- if (cq == NULL ||
- mlx5_vdpa_queue_complete(cq) > 0)
+ virtq = mlx5_vdpa_event_wait(priv);
+ if (virtq == NULL)
break;
+ pthread_mutex_lock(
+ &virtq->virtq_lock);
+ if (mlx5_vdpa_queue_complete(
+ &virtq->eqp.cq) > 0) {
+ pthread_mutex_unlock(
+ &virtq->virtq_lock);
+ break;
+ }
+ pthread_mutex_unlock(
+ &virtq->virtq_lock);
} while (1);
priv->timer_delay_us = priv->event_us;
priv->no_traffic_counter = 0;
} else if (max != 0) {
priv->no_traffic_counter = 0;
}
- pthread_mutex_unlock(&priv->vq_config_lock);
mlx5_vdpa_timer_sleep(priv, max);
}
return NULL;
case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
do {
- cq = mlx5_vdpa_event_wait(priv);
- if (cq != NULL) {
- pthread_mutex_lock(&priv->vq_config_lock);
- if (mlx5_vdpa_queue_complete(cq) > 0)
- mlx5_vdpa_cq_arm(priv, cq);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ virtq = mlx5_vdpa_event_wait(priv);
+ if (virtq != NULL) {
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_vdpa_queue_complete(
+ &virtq->eqp.cq) > 0)
+ mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
} while (1);
return NULL;
struct mlx5_vdpa_virtq *virtq;
uint64_t sec;
- pthread_mutex_lock(&priv->vq_config_lock);
while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
sizeof(out.buf)) >=
(ssize_t)sizeof(out.event_resp.cookie)) {
continue;
}
virtq = &priv->virtqs[vq_index];
+ pthread_mutex_lock(&virtq->virtq_lock);
if (!virtq->enable || virtq->version != version)
- continue;
+ goto unlock;
if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
- continue;
+ goto unlock;
virtq->stopped = true;
/* Query error info. */
if (mlx5_vdpa_virtq_query(priv, vq_index))
for (i = 1; i < RTE_DIM(virtq->err_time); i++)
virtq->err_time[i - 1] = virtq->err_time[i];
virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
+unlock:
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
- pthread_mutex_unlock(&priv->vq_config_lock);
#endif
}
void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
void *status;
+ int i;
if (priv->timer_tid) {
pthread_cancel(priv->timer_tid);
pthread_join(priv->timer_tid, &status);
+ /* The mutex may stay locked after event thread cancel, initiate it. */
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_init(&virtq->virtq_lock, NULL);
+ }
}
priv->timer_tid = 0;
}
int
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
- int callfd, struct mlx5_vdpa_event_qp *eqp)
+ int callfd, struct mlx5_vdpa_virtq *virtq)
{
+ struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
struct mlx5_devx_qp_attr attr = {0};
uint16_t log_desc_n = rte_log2_u32(desc_n);
uint32_t ret;
}
if (eqp->fw_qp)
mlx5_vdpa_event_qp_destroy(eqp);
- if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
+ if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
+ !eqp->cq.cq_obj.cq)
return -1;
attr.pd = priv->cdev->pdn;
attr.ts_format =
attr.ts_format =
mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
- attr.num_of_receive_wqes *
- MLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);
+ attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
+ &attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
mlx5_vdpa_event_qp_destroy(eqp);
return -1;
}
+
virtq = &priv->virtqs[i];
if (!virtq->configured) {
DRV_LOG(DEBUG, "virtq %d is invalid for dirty bitmap enabling.", i);
- } else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
+ } else {
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
&attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for dirty bitmap enabling.", i);
- return -1;
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ DRV_LOG(ERR,
+ "Failed to modify virtq %d for dirty bitmap enabling.",
+ i);
+ return -1;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
return 0;
virtq = &priv->virtqs[i];
if (!virtq->configured) {
DRV_LOG(DEBUG, "virtq %d is invalid for LM.", i);
- } else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
- &attr)) {
- DRV_LOG(ERR, "Failed to modify virtq %d for LM.", i);
- goto err;
+ } else {
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_devx_cmd_modify_virtq(
+ priv->virtqs[i].virtq,
+ &attr)) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ DRV_LOG(ERR,
+ "Failed to modify virtq %d for LM.", i);
+ goto err;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
return 0;
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
uint64_t features;
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
int i;
if (!RTE_VHOST_NEED_LOG(features))
return 0;
for (i = 0; i < priv->nr_virtqs; ++i) {
+ virtq = &priv->virtqs[i];
if (!priv->virtqs[i].virtq) {
DRV_LOG(DEBUG, "virtq %d is invalid for LM log.", i);
} else {
+ pthread_mutex_lock(&virtq->virtq_lock);
ret = mlx5_vdpa_virtq_stop(priv, i);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (ret) {
DRV_LOG(ERR, "Failed to stop virtq %d for LM "
"log.", i);
int
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
{
- int ret = mlx5_vdpa_rqt_prepare(priv);
+ int ret;
+ pthread_mutex_lock(&priv->steer_update_lock);
+ ret = mlx5_vdpa_rqt_prepare(priv);
if (ret == 0) {
mlx5_vdpa_steer_unset(priv);
} else if (ret < 0) {
+ pthread_mutex_unlock(&priv->steer_update_lock);
return ret;
} else if (!priv->steer.rss[0].flow) {
ret = mlx5_vdpa_rss_flows_create(priv);
if (ret) {
DRV_LOG(ERR, "Cannot create RSS flows.");
+ pthread_mutex_unlock(&priv->steer_update_lock);
return -1;
}
}
+ pthread_mutex_unlock(&priv->steer_update_lock);
return 0;
}
int nbytes;
int retry;
+ pthread_mutex_lock(&virtq->virtq_lock);
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
priv->vid, virtq->index);
return;
}
- if (rte_intr_fd_get(virtq->intr_handle) < 0)
+ if (rte_intr_fd_get(virtq->intr_handle) < 0) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
return;
+ }
for (retry = 0; retry < 3; ++retry) {
nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
8);
}
break;
}
- if (nbytes < 0)
+ if (nbytes < 0) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
return;
+ }
+ rte_spinlock_lock(&priv->db_lock);
rte_write32(virtq->index, priv->virtq_db_addr);
+ rte_spinlock_unlock(&priv->db_lock);
+ pthread_mutex_unlock(&virtq->virtq_lock);
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
priv->vid, virtq->index);
DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
}
+/* Virtq must be locked before calling this function. */
+static void
+mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)
+{
+ int ret = -EAGAIN;
+
+ if (!virtq->intr_handle)
+ return;
+ if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
+ while (ret == -EAGAIN) {
+ ret = rte_intr_callback_unregister(virtq->intr_handle,
+ mlx5_vdpa_virtq_kick_handler, virtq);
+ if (ret == -EAGAIN) {
+ DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
+ rte_intr_fd_get(virtq->intr_handle),
+ virtq->index);
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ usleep(MLX5_VDPA_INTR_RETRIES_USEC);
+ pthread_mutex_lock(&virtq->virtq_lock);
+ }
+ }
+ (void)rte_intr_fd_set(virtq->intr_handle, -1);
+ }
+ rte_intr_instance_free(virtq->intr_handle);
+ virtq->intr_handle = NULL;
+}
+
/* Release cached VQ resources. */
void
mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
virtq->configured = 0;
for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
if (virtq->umems[j].obj) {
}
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
+ pthread_mutex_unlock(&virtq->virtq_lock);
}
}
+
static int
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
{
int ret = -EAGAIN;
- if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
- while (ret == -EAGAIN) {
- ret = rte_intr_callback_unregister(virtq->intr_handle,
- mlx5_vdpa_virtq_kick_handler, virtq);
- if (ret == -EAGAIN) {
- DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
- rte_intr_fd_get(virtq->intr_handle),
- virtq->index);
- usleep(MLX5_VDPA_INTR_RETRIES_USEC);
- }
- }
- rte_intr_fd_set(virtq->intr_handle, -1);
- }
- rte_intr_instance_free(virtq->intr_handle);
+ mlx5_vdpa_virtq_unregister_intr_handle(virtq);
if (virtq->configured) {
ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
if (ret)
void
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{
+ struct mlx5_vdpa_virtq *virtq;
int i;
- for (i = 0; i < priv->nr_virtqs; i++)
- mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
+ for (i = 0; i < priv->nr_virtqs; i++) {
+ virtq = &priv->virtqs[i];
+ pthread_mutex_lock(&virtq->virtq_lock);
+ mlx5_vdpa_virtq_unset(virtq);
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ }
priv->features = 0;
priv->nr_virtqs = 0;
}
MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
ret = mlx5_vdpa_event_qp_prepare(priv,
- vq->size, vq->callfd, &virtq->eqp);
+ vq->size, vq->callfd, virtq);
if (ret) {
DRV_LOG(ERR,
"Failed to create event QPs for virtq %d.",
}
claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
virtq->configured = 1;
+ rte_spinlock_lock(&priv->db_lock);
rte_write32(virtq->index, priv->virtq_db_addr);
+ rte_spinlock_unlock(&priv->db_lock);
/* Setup doorbell mapping. */
virtq->intr_handle =
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
if (rte_intr_callback_register(virtq->intr_handle,
mlx5_vdpa_virtq_kick_handler,
virtq)) {
- rte_intr_fd_set(virtq->intr_handle, -1);
+ (void)rte_intr_fd_set(virtq->intr_handle, -1);
DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
index);
goto error;
uint32_t i;
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
+ struct mlx5_vdpa_virtq *virtq;
if (ret || mlx5_vdpa_features_validate(priv)) {
DRV_LOG(ERR, "Failed to configure negotiated features.");
return -1;
}
priv->nr_virtqs = nr_vring;
- for (i = 0; i < nr_vring; i++)
- if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
- goto error;
+ for (i = 0; i < nr_vring; i++) {
+ virtq = &priv->virtqs[i];
+ if (virtq->enable) {
+ pthread_mutex_lock(&virtq->virtq_lock);
+ if (mlx5_vdpa_virtq_setup(priv, i)) {
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ goto error;
+ }
+ pthread_mutex_unlock(&virtq->virtq_lock);
+ }
+ }
return 0;
error:
mlx5_vdpa_virtqs_release(priv);