1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_hexdump.h>
11 #include <rte_pause.h>
15 #define MAX_NUM 1 << 20
18 {printf(x "() test failed!\n");\
21 /* this is really a sanity check */
23 test_macros(int __rte_unused unused_parm)
25 #define SMALLER 0x1000U
26 #define BIGGER 0x2000U
27 #define PTR_DIFF BIGGER - SMALLER
28 #define FAIL_MACRO(x)\
29 {printf(#x "() test failed!\n");\
33 unsigned int smaller = SMALLER, bigger = BIGGER;
37 RTE_SWAP(smaller, bigger);
38 if (smaller != BIGGER && bigger != SMALLER)
40 if ((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF) != BIGGER)
41 FAIL_MACRO(RTE_PTR_ADD);
42 if ((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF) != SMALLER)
43 FAIL_MACRO(RTE_PTR_SUB);
44 if (RTE_PTR_DIFF(BIGGER, SMALLER) != PTR_DIFF)
45 FAIL_MACRO(RTE_PTR_DIFF);
46 if (RTE_MAX(SMALLER, BIGGER) != BIGGER)
48 if (RTE_MIN(SMALLER, BIGGER) != SMALLER)
51 if (strncmp(RTE_STR(test), "test", sizeof("test")))
62 /* safe versions should be able to handle 0 */
63 if (rte_bsf32_safe(0, &pos) != 0)
64 FAIL("rte_bsf32_safe");
65 if (rte_bsf64_safe(0, &pos) != 0)
66 FAIL("rte_bsf64_safe");
68 for (shift = 0; shift < 63; shift++) {
72 val64 = 1ULL << shift;
73 if ((uint32_t)rte_bsf64(val64) != shift)
75 if (rte_bsf64_safe(val64, &pos) != 1)
76 FAIL("rte_bsf64_safe");
78 FAIL("rte_bsf64_safe");
84 if ((uint32_t)rte_bsf32(val32) != shift)
86 if (rte_bsf32_safe(val32, &pos) != 1)
87 FAIL("rte_bsf32_safe");
89 FAIL("rte_bsf32_safe");
98 char memdump[] = "memdump_test";
100 rte_memdump(stdout, "test", memdump, sizeof(memdump));
101 rte_hexdump(stdout, "test", memdump, sizeof(memdump));
111 #define FAIL_ALIGN(x, i, p)\
112 {printf(x "() test failed: %u %u\n", i, p);\
114 #define FAIL_ALIGN64(x, j, q)\
115 {printf(x "() test failed: %"PRIu64" %"PRIu64"\n", j, q);\
117 #define ERROR_FLOOR(res, i, pow) \
118 (res % pow) || /* check if not aligned */ \
119 ((res / pow) != (i / pow)) /* check if correct alignment */
120 #define ERROR_CEIL(res, i, pow) \
121 (res % pow) || /* check if not aligned */ \
122 ((i % pow) == 0 ? /* check if ceiling is invoked */ \
123 val / pow != i / pow : /* if aligned */ \
124 val / pow != (i / pow) + 1) /* if not aligned, hence +1 */
129 for (i = 1, p = 1; i <= MAX_NUM; i ++) {
130 if (rte_align32pow2(i) != p)
131 FAIL_ALIGN("rte_align32pow2", i, p);
136 for (i = 1, p = 1; i <= MAX_NUM; i++) {
137 if (rte_align32prevpow2(i) != p)
138 FAIL_ALIGN("rte_align32prevpow2", i, p);
139 if (rte_is_power_of_2(i + 1))
143 for (j = 1, q = 1; j <= MAX_NUM ; j++) {
144 if (rte_align64pow2(j) != q)
145 FAIL_ALIGN64("rte_align64pow2", j, q);
150 for (j = 1, q = 1; j <= MAX_NUM ; j++) {
151 if (rte_align64prevpow2(j) != q)
152 FAIL_ALIGN64("rte_align64prevpow2", j, q);
153 if (rte_is_power_of_2(j + 1))
157 for (p = 2; p <= MAX_NUM; p <<= 1) {
159 if (!rte_is_power_of_2(p))
160 FAIL("rte_is_power_of_2");
162 for (i = 1; i <= MAX_NUM; i++) {
164 if (RTE_ALIGN_FLOOR((uintptr_t)i, p) % p)
165 FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p);
167 val = RTE_PTR_ALIGN_FLOOR((uintptr_t) i, p);
168 if (ERROR_FLOOR(val, i, p))
169 FAIL_ALIGN("RTE_PTR_ALIGN_FLOOR", i, p);
171 val = RTE_ALIGN_FLOOR(i, p);
172 if (ERROR_FLOOR(val, i, p))
173 FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p);
176 val = RTE_PTR_ALIGN((uintptr_t) i, p);
177 if (ERROR_CEIL(val, i, p))
178 FAIL_ALIGN("RTE_PTR_ALIGN", i, p);
180 val = RTE_ALIGN(i, p);
181 if (ERROR_CEIL(val, i, p))
182 FAIL_ALIGN("RTE_ALIGN", i, p);
184 val = RTE_ALIGN_CEIL(i, p);
185 if (ERROR_CEIL(val, i, p))
186 FAIL_ALIGN("RTE_ALIGN_CEIL", i, p);
188 val = RTE_PTR_ALIGN_CEIL((uintptr_t)i, p);
189 if (ERROR_CEIL(val, i, p))
190 FAIL_ALIGN("RTE_PTR_ALIGN_CEIL", i, p);
192 /* by this point we know that val is aligned to p */
193 if (!rte_is_aligned((void*)(uintptr_t) val, p))
194 FAIL("rte_is_aligned");
198 for (p = 1; p <= MAX_NUM / 2; p++) {
199 for (i = 1; i <= MAX_NUM / 2; i++) {
200 val = RTE_ALIGN_MUL_CEIL(i, p);
201 if (val % p != 0 || val < i)
202 FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p);
203 val = RTE_ALIGN_MUL_FLOOR(i, p);
204 if (val % p != 0 || val > i)
205 FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p);
206 val = RTE_ALIGN_MUL_NEAR(i, p);
207 if (val % p != 0 || ((val != RTE_ALIGN_MUL_CEIL(i, p))
208 & (val != RTE_ALIGN_MUL_FLOOR(i, p))))
209 FAIL_ALIGN("RTE_ALIGN_MUL_NEAR", i, p);
219 uint32_t i, base, compare;
220 const uint32_t max = 0x10000;
221 const uint32_t step = 1;
223 compare = rte_log2_u32(0);
225 printf("Wrong rte_log2_u32(0) val %x, expected 0\n", compare);
229 compare = rte_log2_u64(0);
231 printf("Wrong rte_log2_u64(0) val %x, expected 0\n", compare);
235 for (i = 1; i < max; i = i + step) {
238 /* extend range for 64-bit */
239 i64 = (uint64_t)i << 32;
240 base = (uint32_t)ceilf(log2(i64));
241 compare = rte_log2_u64(i64);
242 if (base != compare) {
243 printf("Wrong rte_log2_u64(%" PRIx64 ") val %x, expected %x\n",
248 base = (uint32_t)ceilf(log2((uint32_t)i));
249 compare = rte_log2_u32((uint32_t)i);
250 if (base != compare) {
251 printf("Wrong rte_log2_u32(%x) val %x, expected %x\n",
255 compare = rte_log2_u64((uint64_t)i);
256 if (base != compare) {
257 printf("Wrong rte_log2_u64(%x) val %x, expected %x\n",
268 struct fls_test_vector {
275 const struct fls_test_vector test[] = {
282 for (i = 0; i < RTE_DIM(test); i++) {
286 rc = rte_fls_u32(arg);
287 expected = test[i].rc;
288 if (rc != expected) {
289 printf("Wrong rte_fls_u32(0x%x) rc=%d, expected=%d\n",
295 rc = rte_fls_u64(arg);
296 expected = test[i].rc;
297 if (rc != expected) {
298 printf("Wrong rte_fls_u64(0x%x) rc=%d, expected=%d\n",
302 /* 64-bit version shifted by 32 bits */
303 arg64 = (uint64_t)test[i].arg << 32;
304 rc = rte_fls_u64(arg64);
305 /* don't shift zero */
306 expected = test[i].rc == 0 ? 0 : test[i].rc + 32;
307 if (rc != expected) {
308 printf("Wrong rte_fls_u64(0x%" PRIx64 ") rc=%d, expected=%d\n",
309 arg64, rc, expected);
322 ret |= test_macros(0);
331 REGISTER_TEST_COMMAND(common_autotest, test_common);