From: Matan Azrad Date: Sun, 2 Feb 2020 16:03:49 +0000 (+0000) Subject: vdpa/mlx5: support queue state operation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=af72fdb546d5e8f7315fa25b7a47ab5450fe9bee;p=dpdk.git vdpa/mlx5: support queue state operation Add support for set_vring_state operation. Using DevX API the virtq state can be changed as described in PRM: enable - move to ready state. disable - move to suspend state. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko Acked-by: Maxime Coquelin --- diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index 28b94a3814..3615681d7a 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -106,13 +106,34 @@ mlx5_vdpa_get_protocol_features(int did, uint64_t *features) return 0; } +static int +mlx5_vdpa_set_vring_state(int vid, int vring, int state) +{ + int did = rte_vhost_get_vdpa_device_id(vid); + struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did); + struct mlx5_vdpa_virtq *virtq = NULL; + + if (priv == NULL) { + DRV_LOG(ERR, "Invalid device id: %d.", did); + return -EINVAL; + } + SLIST_FOREACH(virtq, &priv->virtq_list, next) + if (virtq->index == vring) + break; + if (!virtq) { + DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring); + return -EINVAL; + } + return mlx5_vdpa_virtq_enable(virtq, state); +} + static struct rte_vdpa_dev_ops mlx5_vdpa_ops = { .get_queue_num = mlx5_vdpa_get_queue_num, .get_features = mlx5_vdpa_get_vdpa_features, .get_protocol_features = mlx5_vdpa_get_protocol_features, .dev_conf = NULL, .dev_close = NULL, - .set_vring_state = NULL, + .set_vring_state = mlx5_vdpa_set_vring_state, .set_features = NULL, .migration_done = NULL, .get_vfio_group_fd = NULL, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index d43d283fe2..34b4fab07f 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -70,8 +70,10 @@ struct mlx5_vdpa_query_mr { struct mlx5_vdpa_virtq { SLIST_ENTRY(mlx5_vdpa_virtq) next; + uint8_t enable; uint16_t index; uint16_t vq_size; + struct mlx5_vdpa_priv *priv; struct mlx5_devx_obj *virtq; struct mlx5_vdpa_event_qp eqp; struct { @@ -212,6 +214,19 @@ void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv); */ int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv); +/** + * Enable\Disable virtq.. + * + * @param[in] virtq + * The vdpa driver private virtq structure. + * @param[in] enable + * Set to enable, otherwise disable. + * + * @return + * 0 on success, a negative value otherwise. + */ +int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); + /** * Unset steering and release all its related resources- stop traffic. * diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c index f365c10b1b..36017f1640 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c @@ -73,7 +73,7 @@ is_virtq_recvq(int virtq_index, int nr_vring) } #define MLX5_VDPA_DEFAULT_RQT_SIZE 512 -static int __rte_unused +static int mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) { struct mlx5_vdpa_virtq *virtq; @@ -91,7 +91,8 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) return -ENOMEM; } SLIST_FOREACH(virtq, &priv->virtq_list, next) { - if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + if (is_virtq_recvq(virtq->index, priv->nr_virtqs) && + virtq->enable) { attr->rq_list[i] = virtq->virtq->id; i++; } @@ -116,6 +117,23 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) return ret; } +int +mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable) +{ + struct mlx5_vdpa_priv *priv = virtq->priv; + int ret = 0; + + if (virtq->enable == !!enable) + return 0; + virtq->enable = !!enable; + if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + ret = mlx5_vdpa_rqt_prepare(priv); + if (ret) + virtq->enable = !enable; + } + return ret; +} + static int __rte_unused mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv) { diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index e27af28538..9967be3b53 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -15,13 +15,13 @@ static int mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq) { - int i; + unsigned int i; if (virtq->virtq) { claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); virtq->virtq = NULL; } - for (i = 0; i < 3; ++i) { + for (i = 0; i < RTE_DIM(virtq->umems); ++i) { if (virtq->umems[i].obj) claim_zero(mlx5_glue->devx_umem_dereg (virtq->umems[i].obj)); @@ -60,6 +60,19 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv) priv->features = 0; } +static int +mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state) +{ + struct mlx5_devx_virtq_attr attr = { + .type = MLX5_VIRTQ_MODIFY_TYPE_STATE, + .state = state ? MLX5_VIRTQ_STATE_RDY : + MLX5_VIRTQ_STATE_SUSPEND, + .queue_index = virtq->index, + }; + + return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr); +} + static uint64_t mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva) { @@ -86,7 +99,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, struct mlx5_devx_virtq_attr attr = {0}; uint64_t gpa; int ret; - int i; + unsigned int i; uint16_t last_avail_idx; uint16_t last_used_idx; @@ -125,7 +138,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, " need event QPs and event mechanism.", index); } /* Setup 3 UMEMs for each virtq. */ - for (i = 0; i < 3; ++i) { + for (i = 0; i < RTE_DIM(virtq->umems); ++i) { virtq->umems[i].size = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b; virtq->umems[i].buf = rte_zmalloc(__func__, @@ -182,8 +195,12 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, attr.tis_id = priv->tis->id; attr.queue_index = index; virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr); + virtq->priv = priv; if (!virtq->virtq) goto error; + if (mlx5_vdpa_virtq_modify(virtq, 1)) + goto error; + virtq->enable = 1; return 0; error: mlx5_vdpa_virtq_unset(virtq);