vdpa/mlx5: fix maximum number of virtqs
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa.c
index 76fa5d4..fc17a4b 100644 (file)
@@ -14,6 +14,7 @@
 #include <rte_errno.h>
 #include <rte_string_fns.h>
 #include <rte_bus_pci.h>
+#include <rte_eal_paging.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_common.h>
@@ -84,7 +85,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
                DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
                return -1;
        }
-       *queue_num = priv->caps.max_num_virtio_queues;
+       *queue_num = priv->caps.max_num_virtio_queues / 2;
        return 0;
 }
 
@@ -141,7 +142,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
                DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
                return -EINVAL;
        }
-       if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+       if (vring >= (int)priv->caps.max_num_virtio_queues) {
                DRV_LOG(ERR, "Too big vring id: %d.", vring);
                return -E2BIG;
        }
@@ -388,7 +389,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
                DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
                return -ENODEV;
        }
-       if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+       if (qid >= (int)priv->caps.max_num_virtio_queues) {
                DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
                                vdev->device->name);
                return -E2BIG;
@@ -411,7 +412,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
                DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
                return -ENODEV;
        }
-       if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+       if (qid >= (int)priv->caps.max_num_virtio_queues) {
                DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
                                vdev->device->name);
                return -E2BIG;
@@ -560,6 +561,9 @@ mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)
                rte_errno = errno;
                return -rte_errno;
        }
+       /* Add within page offset for 64K page system. */
+       priv->virtq_db_addr = (char *)priv->virtq_db_addr +
+               ((rte_mem_page_size() - 1) & priv->caps.doorbell_bar_offset);
        DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
                priv->virtq_db_addr);
        priv->td = mlx5_devx_cmd_create_td(ctx);
@@ -624,7 +628,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
                DRV_LOG(DEBUG, "No capability to support virtq statistics.");
        priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
                           sizeof(struct mlx5_vdpa_virtq) *
-                          attr->vdpa.max_num_virtio_queues * 2,
+                          attr->vdpa.max_num_virtio_queues,
                           RTE_CACHE_LINE_SIZE);
        if (!priv) {
                DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +689,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
        uint32_t i;
 
        mlx5_vdpa_dev_cache_clean(priv);
-       for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+       for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
                if (!priv->virtqs[i].counters)
                        continue;
                claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
@@ -705,7 +709,9 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
        if (priv->td)
                claim_zero(mlx5_devx_cmd_destroy(priv->td));
        if (priv->virtq_db_addr)
-               claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
+               /* Mask out the within page offset for munmap. */
+               claim_zero(munmap((void *)((uintptr_t)priv->virtq_db_addr &
+                       ~(rte_mem_page_size() - 1)), priv->var->length));
        if (priv->var)
                mlx5_glue->dv_free_var(priv->var);
 }