mem: add function to check if memory is contiguous
[dpdk.git] / lib / librte_eal / common / eal_common_memalloc.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4
5 #include <rte_lcore.h>
6 #include <rte_fbarray.h>
7 #include <rte_memzone.h>
8 #include <rte_memory.h>
9 #include <rte_eal_memconfig.h>
10
11 #include "eal_private.h"
12 #include "eal_internal_cfg.h"
13 #include "eal_memalloc.h"
14
15 bool
16 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
17                 size_t len)
18 {
19         void *end, *aligned_start, *aligned_end;
20         size_t pgsz = (size_t)msl->page_sz;
21         const struct rte_memseg *ms;
22
23         /* for IOVA_VA, it's always contiguous */
24         if (rte_eal_iova_mode() == RTE_IOVA_VA)
25                 return true;
26
27         /* for legacy memory, it's always contiguous */
28         if (internal_config.legacy_mem)
29                 return true;
30
31         end = RTE_PTR_ADD(start, len);
32
33         /* for nohuge, we check pagemap, otherwise check memseg */
34         if (!rte_eal_has_hugepages()) {
35                 rte_iova_t cur, expected;
36
37                 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
38                 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
39
40                 /* if start and end are on the same page, bail out early */
41                 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
42                         return true;
43
44                 /* skip first iteration */
45                 cur = rte_mem_virt2iova(aligned_start);
46                 expected = cur + pgsz;
47                 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
48
49                 while (aligned_start < aligned_end) {
50                         cur = rte_mem_virt2iova(aligned_start);
51                         if (cur != expected)
52                                 return false;
53                         aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
54                         expected += pgsz;
55                 }
56         } else {
57                 int start_seg, end_seg, cur_seg;
58                 rte_iova_t cur, expected;
59
60                 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
61                 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
62
63                 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) /
64                                 pgsz;
65                 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) /
66                                 pgsz;
67
68                 /* if start and end are on the same page, bail out early */
69                 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
70                         return true;
71
72                 /* skip first iteration */
73                 ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
74                 cur = ms->iova;
75                 expected = cur + pgsz;
76
77                 /* if we can't access IOVA addresses, assume non-contiguous */
78                 if (cur == RTE_BAD_IOVA)
79                         return false;
80
81                 for (cur_seg = start_seg + 1; cur_seg < end_seg;
82                                 cur_seg++, expected += pgsz) {
83                         ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
84
85                         if (ms->iova != expected)
86                                 return false;
87                 }
88         }
89         return true;
90 }