net/virtio-user: fix hugepage files enumeration
authorJianfeng Tan <jianfeng.tan@intel.com>
Thu, 26 Apr 2018 15:34:07 +0000 (15:34 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 14 May 2018 21:31:47 +0000 (22:31 +0100)
After the commit 2a04139f66b4 ("eal: add single file segments option"),
one hugepage file could contain multiple hugepages which are further
mapped to different memory regions.

Original enumeration implementation cannot handle this situation.

This patch filters out the duplicated files; and adjust the size after
the enumeration.

Fixes: 6a84c37e3975 ("net/virtio-user: add vhost-user adapter layer")

Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
doc/guides/howto/virtio_user_for_container_networking.rst
drivers/net/virtio/virtio_user/vhost_user.c

index aa68b53..476ce3a 100644 (file)
@@ -109,7 +109,8 @@ We have below limitations in this solution:
  * Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping
    under this option which cannot be reopened to share with vhost backend.
  * Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
-   In another word, do not use 2MB hugepage so far.
+   If you have more regions (especially when 2MB hugepages are used), the option,
+   --single-file-segments, can help to reduce the number of shared files.
  * Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That
    will bring confusion when sharing hugepage files with backend by name.
  * Root privilege is a must. DPDK resolves physical addresses of hugepages
index a6df97a..573ef07 100644 (file)
@@ -138,12 +138,13 @@ struct hugepage_file_info {
 static int
 get_hugepage_file_info(struct hugepage_file_info huges[], int max)
 {
-       int idx;
+       int idx, k, exist;
        FILE *f;
        char buf[BUFSIZ], *tmp, *tail;
        char *str_underline, *str_start;
        int huge_index;
        uint64_t v_start, v_end;
+       struct stat stats;
 
        f = fopen("/proc/self/maps", "r");
        if (!f) {
@@ -183,16 +184,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
                if (sscanf(str_start, "map_%d", &huge_index) != 1)
                        continue;
 
+               /* skip duplicated file which is mapped to different regions */
+               for (k = 0, exist = -1; k < idx; ++k) {
+                       if (!strcmp(huges[k].path, tmp)) {
+                               exist = k;
+                               break;
+                       }
+               }
+               if (exist >= 0)
+                       continue;
+
                if (idx >= max) {
                        PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
                        goto error;
                }
+
                huges[idx].addr = v_start;
-               huges[idx].size = v_end - v_start;
+               huges[idx].size = v_end - v_start; /* To be corrected later */
                snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
                idx++;
        }
 
+       /* correct the size for files who have many regions */
+       for (k = 0; k < idx; ++k) {
+               if (stat(huges[k].path, &stats) < 0) {
+                       PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+                                   huges[k].path, strerror(errno));
+                       continue;
+               }
+               huges[k].size = stats.st_size;
+               PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+                           huges[k].path, huges[k].size);
+       }
+
        fclose(f);
        return idx;