]> git.droids-corp.org - dpdk.git/commitdiff
vdpa/mlx5: workaround VAR offset within page
authorYajun Wu <yajunw@nvidia.com>
Wed, 15 Jun 2022 10:02:27 +0000 (13:02 +0300)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Fri, 17 Jun 2022 13:34:25 +0000 (15:34 +0200)
vDPA driver first uses kernel driver to allocate doorbell (VAR) area for
each device. Then uses var->mmap_off and var->length to mmap uverbs device
file as doorbell userspace virtual address.

Current kernel driver provides var->mmap_off equal to page start of VAR.
It's fine with x86 4K page server, because VAR physical address is only 4K
aligned thus locate in 4K page start.

But with aarch64 64K page server, the actual VAR physical address has
offset within page (not located in 64K page start).
So the vDPA driver needs to add this within page offset
(caps.doorbell_bar_offset) to get the right VAR virtual address.

Fixes: 62c813706e4 ("vdpa/mlx5: map doorbell")
Cc: stable@dpdk.org
Signed-off-by: Yajun Wu <yajunw@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/vdpa/mlx5/mlx5_vdpa.c

index 76fa5d4299e969145bdcb01d8758fa3855a71a5d..8a33a0c9a1faa859a8dc8633ace095912070db2a 100644 (file)
@@ -14,6 +14,7 @@
 #include <rte_errno.h>
 #include <rte_string_fns.h>
 #include <rte_bus_pci.h>
+#include <rte_eal_paging.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_common.h>
@@ -560,6 +561,9 @@ mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)
                rte_errno = errno;
                return -rte_errno;
        }
+       /* Add within page offset for 64K page system. */
+       priv->virtq_db_addr = (char *)priv->virtq_db_addr +
+               ((rte_mem_page_size() - 1) & priv->caps.doorbell_bar_offset);
        DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
                priv->virtq_db_addr);
        priv->td = mlx5_devx_cmd_create_td(ctx);
@@ -705,7 +709,9 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
        if (priv->td)
                claim_zero(mlx5_devx_cmd_destroy(priv->td));
        if (priv->virtq_db_addr)
-               claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
+               /* Mask out the within page offset for munmap. */
+               claim_zero(munmap((void *)((uintptr_t)priv->virtq_db_addr &
+                       ~(rte_mem_page_size() - 1)), priv->var->length));
        if (priv->var)
                mlx5_glue->dv_free_var(priv->var);
 }