vdpa/mlx5: recreate a virtq becoming enabled
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
index 0991e5a..9f7353d 100644 (file)
@@ -116,20 +116,39 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 {
        int did = rte_vhost_get_vdpa_device_id(vid);
        struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
-       struct mlx5_vdpa_virtq *virtq = NULL;
 
        if (priv == NULL) {
                DRV_LOG(ERR, "Invalid device id: %d.", did);
                return -EINVAL;
        }
-       SLIST_FOREACH(virtq, &priv->virtq_list, next)
-               if (virtq->index == vring)
-                       break;
-       if (!virtq) {
-               DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
-               return -EINVAL;
+       if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+               DRV_LOG(ERR, "Too big vring id: %d.", vring);
+               return -E2BIG;
        }
-       return mlx5_vdpa_virtq_enable(virtq, state);
+       return mlx5_vdpa_virtq_enable(priv, vring, state);
+}
+
+static int
+mlx5_vdpa_direct_db_prepare(struct mlx5_vdpa_priv *priv)
+{
+       int ret;
+
+       if (priv->direct_notifier) {
+               ret = rte_vhost_host_notifier_ctrl(priv->vid, false);
+               if (ret != 0) {
+                       DRV_LOG(INFO, "Direct HW notifier FD cannot be "
+                               "destroyed for device %d: %d.", priv->vid, ret);
+                       return -1;
+               }
+               priv->direct_notifier = 0;
+       }
+       ret = rte_vhost_host_notifier_ctrl(priv->vid, true);
+       if (ret != 0)
+               DRV_LOG(INFO, "Direct HW notifier FD cannot be configured for"
+                       " device %d: %d.", priv->vid, ret);
+       else
+               priv->direct_notifier = 1;
+       return 0;
 }
 
 static int
@@ -185,12 +204,13 @@ mlx5_vdpa_dev_close(int vid)
        if (priv->configured)
                ret |= mlx5_vdpa_lm_log(priv);
        mlx5_vdpa_cqe_event_unset(priv);
-       ret |= mlx5_vdpa_steer_unset(priv);
+       mlx5_vdpa_steer_unset(priv);
        mlx5_vdpa_virtqs_release(priv);
        mlx5_vdpa_event_qp_global_release(priv);
        mlx5_vdpa_mem_dereg(priv);
        priv->configured = 0;
        priv->vid = 0;
+       DRV_LOG(INFO, "vDPA device %d was closed.", vid);
        return ret;
 }
 
@@ -209,12 +229,48 @@ mlx5_vdpa_dev_config(int vid)
                return -1;
        }
        priv->vid = vid;
-       if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_virtqs_prepare(priv) ||
-           mlx5_vdpa_steer_setup(priv) || mlx5_vdpa_cqe_event_setup(priv)) {
+       if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_direct_db_prepare(priv) ||
+           mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
+           mlx5_vdpa_cqe_event_setup(priv)) {
                mlx5_vdpa_dev_close(vid);
                return -1;
        }
        priv->configured = 1;
+       DRV_LOG(INFO, "vDPA device %d was configured.", vid);
+       return 0;
+}
+
+static int
+mlx5_vdpa_get_device_fd(int vid)
+{
+       int did = rte_vhost_get_vdpa_device_id(vid);
+       struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+       if (priv == NULL) {
+               DRV_LOG(ERR, "Invalid device id: %d.", did);
+               return -EINVAL;
+       }
+       return priv->ctx->cmd_fd;
+}
+
+static int
+mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+       int did = rte_vhost_get_vdpa_device_id(vid);
+       struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+       RTE_SET_USED(qid);
+       if (priv == NULL) {
+               DRV_LOG(ERR, "Invalid device id: %d.", did);
+               return -EINVAL;
+       }
+       if (!priv->var) {
+               DRV_LOG(ERR, "VAR was not created for device %d, is the device"
+                       " configured?.", did);
+               return -EINVAL;
+       }
+       *offset = priv->var->mmap_off;
+       *size = priv->var->length;
        return 0;
 }
 
@@ -228,8 +284,8 @@ static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
        .set_features = mlx5_vdpa_features_set,
        .migration_done = NULL,
        .get_vfio_group_fd = NULL,
-       .get_vfio_device_fd = NULL,
-       .get_notify_area = NULL,
+       .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
+       .get_notify_area = mlx5_vdpa_get_notify_area,
 };
 
 static struct ibv_device *
@@ -422,28 +478,28 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                rte_errno = ENODEV;
                return -rte_errno;
        }
-       priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
-                          RTE_CACHE_LINE_SIZE);
-       if (!priv) {
-               DRV_LOG(ERR, "Failed to allocate private memory.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
        ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
        if (ret) {
                DRV_LOG(ERR, "Unable to read HCA capabilities.");
                rte_errno = ENOTSUP;
                goto error;
-       } else {
-               if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
-                       DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
-                               " maybe old FW/OFED version?");
-                       rte_errno = ENOTSUP;
-                       goto error;
-               }
-               priv->caps = attr.vdpa;
-               priv->log_max_rqt_size = attr.log_max_rqt_size;
+       } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+               DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
+                       "old FW/OFED version?");
+               rte_errno = ENOTSUP;
+               goto error;
        }
+       priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+                          sizeof(struct mlx5_vdpa_virtq) *
+                          attr.vdpa.max_num_virtio_queues * 2,
+                          RTE_CACHE_LINE_SIZE);
+       if (!priv) {
+               DRV_LOG(ERR, "Failed to allocate private memory.");
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       priv->caps = attr.vdpa;
+       priv->log_max_rqt_size = attr.log_max_rqt_size;
        priv->ctx = ctx;
        priv->dev_addr.pci_addr = pci_dev->addr;
        priv->dev_addr.type = VDPA_ADDR_PCI;
@@ -459,7 +515,6 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                goto error;
        }
        SLIST_INIT(&priv->mr_list);
-       SLIST_INIT(&priv->virtq_list);
        pthread_mutex_lock(&priv_list_lock);
        TAILQ_INSERT_TAIL(&priv_list, priv, next);
        pthread_mutex_unlock(&priv_list_lock);
@@ -518,14 +573,6 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
 }
 
 static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
-       {
-               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
-                              PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
-       },
-       {
-               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
-                              PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
-       },
        {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
                                PCI_DEVICE_ID_MELLANOX_CONNECTX6)