After the commit
2a04139f66b4 ("eal: add single file segments option"),
one hugepage file could contain multiple hugepages which are further
mapped to different memory regions.
Original enumeration implementation cannot handle this situation.
This patch filters out the duplicated files; and adjust the size after
the enumeration.
Fixes:
6a84c37e3975 ("net/virtio-user: add vhost-user adapter layer")
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
* Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping
under this option which cannot be reopened to share with vhost backend.
* Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
* Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping
under this option which cannot be reopened to share with vhost backend.
* Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
- In another word, do not use 2MB hugepage so far.
+ If you have more regions (especially when 2MB hugepages are used), the option,
+ --single-file-segments, can help to reduce the number of shared files.
* Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That
will bring confusion when sharing hugepage files with backend by name.
* Root privilege is a must. DPDK resolves physical addresses of hugepages
* Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That
will bring confusion when sharing hugepage files with backend by name.
* Root privilege is a must. DPDK resolves physical addresses of hugepages
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
{
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
{
FILE *f;
char buf[BUFSIZ], *tmp, *tail;
char *str_underline, *str_start;
int huge_index;
uint64_t v_start, v_end;
FILE *f;
char buf[BUFSIZ], *tmp, *tail;
char *str_underline, *str_start;
int huge_index;
uint64_t v_start, v_end;
f = fopen("/proc/self/maps", "r");
if (!f) {
f = fopen("/proc/self/maps", "r");
if (!f) {
if (sscanf(str_start, "map_%d", &huge_index) != 1)
continue;
if (sscanf(str_start, "map_%d", &huge_index) != 1)
continue;
+ /* skip duplicated file which is mapped to different regions */
+ for (k = 0, exist = -1; k < idx; ++k) {
+ if (!strcmp(huges[k].path, tmp)) {
+ exist = k;
+ break;
+ }
+ }
+ if (exist >= 0)
+ continue;
+
if (idx >= max) {
PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
goto error;
}
if (idx >= max) {
PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
goto error;
}
huges[idx].addr = v_start;
huges[idx].addr = v_start;
- huges[idx].size = v_end - v_start;
+ huges[idx].size = v_end - v_start; /* To be corrected later */
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
+ /* correct the size for files who have many regions */
+ for (k = 0; k < idx; ++k) {
+ if (stat(huges[k].path, &stats) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+ huges[k].path, strerror(errno));
+ continue;
+ }
+ huges[k].size = stats.st_size;
+ PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+ huges[k].path, huges[k].size);
+ }
+