{
int did = rte_vhost_get_vdpa_device_id(vid);
struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
- struct mlx5_vdpa_virtq *virtq = NULL;
if (priv == NULL) {
DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL;
}
- SLIST_FOREACH(virtq, &priv->virtq_list, next)
- if (virtq->index == vring)
- break;
- if (!virtq) {
- DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
- return -EINVAL;
+ if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+ DRV_LOG(ERR, "Too big vring id: %d.", vring);
+ return -E2BIG;
}
- return mlx5_vdpa_virtq_enable(virtq, state);
+ return mlx5_vdpa_virtq_enable(priv, vring, state);
+}
+
+static int
+mlx5_vdpa_direct_db_prepare(struct mlx5_vdpa_priv *priv)
+{
+ int ret;
+
+ if (priv->direct_notifier) {
+ ret = rte_vhost_host_notifier_ctrl(priv->vid, false);
+ if (ret != 0) {
+ DRV_LOG(INFO, "Direct HW notifier FD cannot be "
+ "destroyed for device %d: %d.", priv->vid, ret);
+ return -1;
+ }
+ priv->direct_notifier = 0;
+ }
+ ret = rte_vhost_host_notifier_ctrl(priv->vid, true);
+ if (ret != 0)
+ DRV_LOG(INFO, "Direct HW notifier FD cannot be configured for"
+ " device %d: %d.", priv->vid, ret);
+ else
+ priv->direct_notifier = 1;
+ return 0;
}
static int
if (priv->configured)
ret |= mlx5_vdpa_lm_log(priv);
mlx5_vdpa_cqe_event_unset(priv);
- ret |= mlx5_vdpa_steer_unset(priv);
+ mlx5_vdpa_steer_unset(priv);
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv);
priv->configured = 0;
priv->vid = 0;
+ DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
return -1;
}
priv->vid = vid;
- if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_virtqs_prepare(priv) ||
- mlx5_vdpa_steer_setup(priv) || mlx5_vdpa_cqe_event_setup(priv)) {
+ if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_direct_db_prepare(priv) ||
+ mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
+ mlx5_vdpa_cqe_event_setup(priv)) {
mlx5_vdpa_dev_close(vid);
return -1;
}
priv->configured = 1;
+ DRV_LOG(INFO, "vDPA device %d was configured.", vid);
+ return 0;
+}
+
+static int
+mlx5_vdpa_get_device_fd(int vid)
+{
+ int did = rte_vhost_get_vdpa_device_id(vid);
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -EINVAL;
+ }
+ return priv->ctx->cmd_fd;
+}
+
+static int
+mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int did = rte_vhost_get_vdpa_device_id(vid);
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ RTE_SET_USED(qid);
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -EINVAL;
+ }
+ if (!priv->var) {
+ DRV_LOG(ERR, "VAR was not created for device %d, is the device"
+ " configured?.", did);
+ return -EINVAL;
+ }
+ *offset = priv->var->mmap_off;
+ *size = priv->var->length;
return 0;
}
.set_features = mlx5_vdpa_features_set,
.migration_done = NULL,
.get_vfio_group_fd = NULL,
- .get_vfio_device_fd = NULL,
- .get_notify_area = NULL,
+ .get_vfio_device_fd = mlx5_vdpa_get_device_fd,
+ .get_notify_area = mlx5_vdpa_get_notify_area,
};
static struct ibv_device *
DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name);
if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &pci_addr))
continue;
- if (memcmp(addr, &pci_addr, sizeof(pci_addr)))
+ if (rte_pci_addr_cmp(addr, &pci_addr))
continue;
ibv_match = ibv_list[n];
break;
rte_errno = ENODEV;
return -rte_errno;
}
- priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
- RTE_CACHE_LINE_SIZE);
- if (!priv) {
- DRV_LOG(ERR, "Failed to allocate private memory.");
- rte_errno = ENOMEM;
- goto error;
- }
ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
goto error;
- } else {
- if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
- DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
- " maybe old FW/OFED version?");
- rte_errno = ENOTSUP;
- goto error;
- }
- priv->caps = attr.vdpa;
- priv->log_max_rqt_size = attr.log_max_rqt_size;
+ } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+ DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
+ "old FW/OFED version?");
+ rte_errno = ENOTSUP;
+ goto error;
}
+ priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+ sizeof(struct mlx5_vdpa_virtq) *
+ attr.vdpa.max_num_virtio_queues * 2,
+ RTE_CACHE_LINE_SIZE);
+ if (!priv) {
+ DRV_LOG(ERR, "Failed to allocate private memory.");
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ priv->caps = attr.vdpa;
+ priv->log_max_rqt_size = attr.log_max_rqt_size;
priv->ctx = ctx;
priv->dev_addr.pci_addr = pci_dev->addr;
- priv->dev_addr.type = PCI_ADDR;
+ priv->dev_addr.type = VDPA_ADDR_PCI;
+ priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
+ if (!priv->var) {
+ DRV_LOG(ERR, "Failed to allocate VAR %u.\n", errno);
+ goto error;
+ }
priv->id = rte_vdpa_register_device(&priv->dev_addr, &mlx5_vdpa_ops);
if (priv->id < 0) {
DRV_LOG(ERR, "Failed to register vDPA device.");
goto error;
}
SLIST_INIT(&priv->mr_list);
- SLIST_INIT(&priv->virtq_list);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
return 0;
error:
- if (priv)
+ if (priv) {
+ if (priv->var)
+ mlx5_glue->dv_free_var(priv->var);
rte_free(priv);
+ }
if (ctx)
mlx5_glue->close_device(ctx);
return -rte_errno;
if (found) {
if (priv->configured)
mlx5_vdpa_dev_close(priv->vid);
+ if (priv->var) {
+ mlx5_glue->dv_free_var(priv->var);
+ priv->var = NULL;
+ }
mlx5_glue->close_device(priv->ctx);
rte_free(priv);
}
}
static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {
- {
- RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
- PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
- },
- {
- RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
- PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
- },
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX6)
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
+ },
{
.vendor_id = 0
}