1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include <rte_spinlock.h>
16 #include <rte_tailq.h>
18 #include "eal_filesystem.h"
19 #include "eal_private.h"
21 #include "rte_fbarray.h"
23 #define MASK_SHIFT 6ULL
24 #define MASK_ALIGN (1 << MASK_SHIFT)
25 #define MASK_LEN_TO_IDX(x) ((x) >> MASK_SHIFT)
26 #define MASK_LEN_TO_MOD(x) ((x) - RTE_ALIGN_FLOOR(x, MASK_ALIGN))
27 #define MASK_GET_IDX(idx, mod) ((idx << MASK_SHIFT) + mod)
30 * This is a mask that is always stored at the end of array, to provide fast
31 * way of finding free/used spots without looping through each element.
40 calc_mask_size(int len)
42 /* mask must be multiple of MASK_ALIGN, even though length of array
43 * itself may not be aligned on that boundary.
45 len = RTE_ALIGN_CEIL(len, MASK_ALIGN);
46 return sizeof(struct used_mask) +
47 sizeof(uint64_t) * MASK_LEN_TO_IDX(len);
51 calc_data_size(size_t page_sz, int elt_sz, int len)
53 size_t data_sz = elt_sz * len;
54 size_t msk_sz = calc_mask_size(len);
55 return RTE_ALIGN_CEIL(data_sz + msk_sz, page_sz);
58 static struct used_mask *
59 get_used_mask(void *data, int elt_sz, int len)
61 return (struct used_mask *) RTE_PTR_ADD(data, elt_sz * len);
65 resize_and_map(int fd, void *addr, size_t len)
70 if (ftruncate(fd, len)) {
71 RTE_LOG(ERR, EAL, "Cannot truncate %s\n", path);
72 /* pass errno up the chain */
77 map_addr = mmap(addr, len, PROT_READ | PROT_WRITE,
78 MAP_SHARED | MAP_FIXED, fd, 0);
79 if (map_addr != addr) {
80 RTE_LOG(ERR, EAL, "mmap() failed: %s\n", strerror(errno));
81 /* pass errno up the chain */
89 find_next_n(const struct rte_fbarray *arr, int start, int n, bool used)
91 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
93 int msk_idx, lookahead_idx, first, first_mod;
94 int last, last_mod, last_msk;
98 * mask only has granularity of MASK_ALIGN, but start may not be aligned
99 * on that boundary, so construct a special mask to exclude anything we
100 * don't want to see to avoid confusing ctz.
102 first = MASK_LEN_TO_IDX(start);
103 first_mod = MASK_LEN_TO_MOD(start);
104 ignore_msk = ~((1ULL << first_mod) - 1);
106 /* array length may not be aligned, so calculate ignore mask for last
109 last = MASK_LEN_TO_IDX(arr->len);
110 last_mod = MASK_LEN_TO_MOD(arr->len);
111 last_msk = ~(-(1ULL) << last_mod);
113 for (msk_idx = first; msk_idx < msk->n_masks; msk_idx++) {
114 uint64_t cur_msk, lookahead_msk;
115 int run_start, clz, left;
118 * The process of getting n consecutive bits for arbitrary n is
119 * a bit involved, but here it is in a nutshell:
121 * 1. let n be the number of consecutive bits we're looking for
122 * 2. check if n can fit in one mask, and if so, do n-1
123 * rshift-ands to see if there is an appropriate run inside
125 * 2a. if we found a run, bail out early
126 * 2b. if we didn't find a run, proceed
127 * 3. invert the mask and count leading zeroes (that is, count
128 * how many consecutive set bits we had starting from the
129 * end of current mask) as k
130 * 3a. if k is 0, continue to next mask
131 * 3b. if k is not 0, we have a potential run
132 * 4. to satisfy our requirements, next mask must have n-k
133 * consecutive set bits right at the start, so we will do
134 * (n-k-1) rshift-ands and check if first bit is set.
136 * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
137 * we either run out of masks, lose the run, or find what we
140 cur_msk = msk->data[msk_idx];
143 /* if we're looking for free spaces, invert the mask */
147 /* combine current ignore mask with last index ignore mask */
149 ignore_msk |= last_msk;
151 /* if we have an ignore mask, ignore once */
153 cur_msk &= ignore_msk;
157 /* if n can fit in within a single mask, do a search */
158 if (n <= MASK_ALIGN) {
159 uint64_t tmp_msk = cur_msk;
161 for (s_idx = 0; s_idx < n - 1; s_idx++)
162 tmp_msk &= tmp_msk >> 1ULL;
163 /* we found what we were looking for */
165 run_start = __builtin_ctzll(tmp_msk);
166 return MASK_GET_IDX(msk_idx, run_start);
171 * we didn't find our run within the mask, or n > MASK_ALIGN,
172 * so we're going for plan B.
175 /* count leading zeroes on inverted mask */
177 clz = sizeof(cur_msk) * 8;
179 clz = __builtin_clzll(~cur_msk);
181 /* if there aren't any runs at the end either, just continue */
185 /* we have a partial run at the end, so try looking ahead */
186 run_start = MASK_ALIGN - clz;
189 for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
192 lookahead_msk = msk->data[lookahead_idx];
194 /* if we're looking for free space, invert the mask */
196 lookahead_msk = ~lookahead_msk;
198 /* figure out how many consecutive bits we need here */
199 need = RTE_MIN(left, MASK_ALIGN);
201 for (s_idx = 0; s_idx < need - 1; s_idx++)
202 lookahead_msk &= lookahead_msk >> 1ULL;
204 /* if first bit is not set, we've lost the run */
205 if ((lookahead_msk & 1) == 0) {
207 * we've scanned this far, so we know there are
208 * no runs in the space we've lookahead-scanned
209 * as well, so skip that on next iteration.
211 ignore_msk = ~((1ULL << need) - 1);
212 msk_idx = lookahead_idx;
218 /* check if we've found what we were looking for */
225 /* we didn't find anything, so continue */
229 return MASK_GET_IDX(msk_idx, run_start);
231 /* we didn't find anything */
232 rte_errno = used ? -ENOENT : -ENOSPC;
237 find_next(const struct rte_fbarray *arr, int start, bool used)
239 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
241 int idx, first, first_mod;
242 int last, last_mod, last_msk;
246 * mask only has granularity of MASK_ALIGN, but start may not be aligned
247 * on that boundary, so construct a special mask to exclude anything we
248 * don't want to see to avoid confusing ctz.
250 first = MASK_LEN_TO_IDX(start);
251 first_mod = MASK_LEN_TO_MOD(start);
252 ignore_msk = ~((1ULL << first_mod) - 1ULL);
254 /* array length may not be aligned, so calculate ignore mask for last
257 last = MASK_LEN_TO_IDX(arr->len);
258 last_mod = MASK_LEN_TO_MOD(arr->len);
259 last_msk = ~(-(1ULL) << last_mod);
261 for (idx = first; idx < msk->n_masks; idx++) {
262 uint64_t cur = msk->data[idx];
265 /* if we're looking for free entries, invert mask */
272 /* ignore everything before start on first iteration */
276 /* check if we have any entries */
281 * find first set bit - that will correspond to whatever it is
282 * that we're looking for.
284 found = __builtin_ctzll(cur);
285 return MASK_GET_IDX(idx, found);
287 /* we didn't find anything */
288 rte_errno = used ? -ENOENT : -ENOSPC;
293 find_contig(const struct rte_fbarray *arr, int start, bool used)
295 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
297 int idx, first, first_mod;
298 int last, last_mod, last_msk;
299 int need_len, result = 0;
301 /* array length may not be aligned, so calculate ignore mask for last
304 last = MASK_LEN_TO_IDX(arr->len);
305 last_mod = MASK_LEN_TO_MOD(arr->len);
306 last_msk = ~(-(1ULL) << last_mod);
308 first = MASK_LEN_TO_IDX(start);
309 first_mod = MASK_LEN_TO_MOD(start);
310 for (idx = first; idx < msk->n_masks; idx++, result += need_len) {
311 uint64_t cur = msk->data[idx];
314 need_len = MASK_ALIGN;
316 /* if we're looking for free entries, invert mask */
320 /* if this is last mask, ignore everything after last bit */
324 /* ignore everything before start on first iteration */
327 /* at the start, we don't need the full mask len */
328 need_len -= first_mod;
331 /* we will be looking for zeroes, so invert the mask */
334 /* if mask is zero, we have a complete run */
339 * see if current run ends before mask end.
341 run_len = __builtin_ctzll(cur);
343 /* add however many zeroes we've had in the last run and quit */
344 if (run_len < need_len) {
353 set_used(struct rte_fbarray *arr, int idx, bool used)
355 struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
356 uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
357 int msk_idx = MASK_LEN_TO_IDX(idx);
361 if (arr == NULL || idx < 0 || idx >= arr->len) {
367 /* prevent array from changing under us */
368 rte_rwlock_write_lock(&arr->rwlock);
370 already_used = (msk->data[msk_idx] & msk_bit) != 0;
372 /* nothing to be done */
373 if (used == already_used)
377 msk->data[msk_idx] |= msk_bit;
380 msk->data[msk_idx] &= ~msk_bit;
384 rte_rwlock_write_unlock(&arr->rwlock);
390 fully_validate(const char *name, unsigned int elt_sz, unsigned int len)
392 if (name == NULL || elt_sz == 0 || len == 0) {
397 if (strnlen(name, RTE_FBARRAY_NAME_LEN) == RTE_FBARRAY_NAME_LEN) {
398 rte_errno = ENAMETOOLONG;
404 int __rte_experimental
405 rte_fbarray_init(struct rte_fbarray *arr, const char *name, int len, int elt_sz)
407 size_t page_sz, mmap_len;
409 struct used_mask *msk;
418 if (fully_validate(name, elt_sz, len))
421 page_sz = sysconf(_SC_PAGESIZE);
423 /* calculate our memory limits */
424 mmap_len = calc_data_size(page_sz, elt_sz, len);
426 data = eal_get_virtual_area(NULL, &mmap_len, page_sz, 0, 0);
430 eal_get_fbarray_path(path, sizeof(path), name);
433 * Each fbarray is unique to process namespace, i.e. the filename
434 * depends on process prefix. Try to take out a lock and see if we
435 * succeed. If we don't, someone else is using it already.
437 fd = open(path, O_CREAT | O_RDWR, 0600);
439 RTE_LOG(DEBUG, EAL, "%s(): couldn't open %s: %s\n", __func__,
440 path, strerror(errno));
443 } else if (flock(fd, LOCK_EX | LOCK_NB)) {
444 RTE_LOG(DEBUG, EAL, "%s(): couldn't lock %s: %s\n", __func__,
445 path, strerror(errno));
450 /* take out a non-exclusive lock, so that other processes could still
451 * attach to it, but no other process could reinitialize it.
453 if (flock(fd, LOCK_SH | LOCK_NB)) {
458 if (resize_and_map(fd, data, mmap_len))
461 /* we've mmap'ed the file, we can now close the fd */
464 /* initialize the data */
465 memset(data, 0, mmap_len);
467 /* populate data structure */
468 snprintf(arr->name, sizeof(arr->name), "%s", name);
471 arr->elt_sz = elt_sz;
474 msk = get_used_mask(data, elt_sz, len);
475 msk->n_masks = MASK_LEN_TO_IDX(RTE_ALIGN_CEIL(len, MASK_ALIGN));
477 rte_rwlock_init(&arr->rwlock);
482 munmap(data, mmap_len);
488 int __rte_experimental
489 rte_fbarray_attach(struct rte_fbarray *arr)
491 size_t page_sz, mmap_len;
502 * we don't need to synchronize attach as two values we need (element
503 * size and array length) are constant for the duration of life of
504 * the array, so the parts we care about will not race.
507 if (fully_validate(arr->name, arr->elt_sz, arr->len))
510 page_sz = sysconf(_SC_PAGESIZE);
512 mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
514 data = eal_get_virtual_area(arr->data, &mmap_len, page_sz, 0, 0);
518 eal_get_fbarray_path(path, sizeof(path), arr->name);
520 fd = open(path, O_RDWR);
526 /* lock the file, to let others know we're using it */
527 if (flock(fd, LOCK_SH | LOCK_NB)) {
532 if (resize_and_map(fd, data, mmap_len))
542 munmap(data, mmap_len);
548 int __rte_experimental
549 rte_fbarray_detach(struct rte_fbarray *arr)
557 * we don't need to synchronize detach as two values we need (element
558 * size and total capacity) are constant for the duration of life of
559 * the array, so the parts we care about will not race. if the user is
560 * detaching while doing something else in the same process, we can't
561 * really do anything about it, things will blow up either way.
564 size_t page_sz = sysconf(_SC_PAGESIZE);
566 /* this may already be unmapped (e.g. repeated call from previously
567 * failed destroy(), but this is on user, we can't (easily) know if this
570 munmap(arr->data, calc_data_size(page_sz, arr->elt_sz, arr->len));
575 int __rte_experimental
576 rte_fbarray_destroy(struct rte_fbarray *arr)
581 ret = rte_fbarray_detach(arr);
585 /* try deleting the file */
586 eal_get_fbarray_path(path, sizeof(path), arr->name);
588 fd = open(path, O_RDONLY);
589 if (flock(fd, LOCK_EX | LOCK_NB)) {
590 RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
596 memset(arr, 0, sizeof(*arr));
603 void * __rte_experimental
604 rte_fbarray_get(const struct rte_fbarray *arr, int idx)
607 if (arr == NULL || idx < 0) {
612 if (idx >= arr->len) {
617 ret = RTE_PTR_ADD(arr->data, idx * arr->elt_sz);
622 int __rte_experimental
623 rte_fbarray_set_used(struct rte_fbarray *arr, int idx)
625 return set_used(arr, idx, true);
628 int __rte_experimental
629 rte_fbarray_set_free(struct rte_fbarray *arr, int idx)
631 return set_used(arr, idx, false);
634 int __rte_experimental
635 rte_fbarray_is_used(struct rte_fbarray *arr, int idx)
637 struct used_mask *msk;
642 if (arr == NULL || idx < 0 || idx >= arr->len) {
647 /* prevent array from changing under us */
648 rte_rwlock_read_lock(&arr->rwlock);
650 msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
651 msk_idx = MASK_LEN_TO_IDX(idx);
652 msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
654 ret = (msk->data[msk_idx] & msk_bit) != 0;
656 rte_rwlock_read_unlock(&arr->rwlock);
661 int __rte_experimental
662 rte_fbarray_find_next_free(struct rte_fbarray *arr, int start)
666 if (arr == NULL || start < 0 || start >= arr->len) {
671 /* prevent array from changing under us */
672 rte_rwlock_read_lock(&arr->rwlock);
674 if (arr->len == arr->count) {
679 ret = find_next(arr, start, false);
681 rte_rwlock_read_unlock(&arr->rwlock);
685 int __rte_experimental
686 rte_fbarray_find_next_used(struct rte_fbarray *arr, int start)
690 if (arr == NULL || start < 0 || start >= arr->len) {
695 /* prevent array from changing under us */
696 rte_rwlock_read_lock(&arr->rwlock);
698 if (arr->count == 0) {
703 ret = find_next(arr, start, true);
705 rte_rwlock_read_unlock(&arr->rwlock);
709 int __rte_experimental
710 rte_fbarray_find_next_n_free(struct rte_fbarray *arr, int start, int n)
714 if (arr == NULL || start < 0 || start >= arr->len ||
715 n < 0 || n > arr->len) {
720 /* prevent array from changing under us */
721 rte_rwlock_read_lock(&arr->rwlock);
723 if (arr->len == arr->count || arr->len - arr->count < n) {
728 ret = find_next_n(arr, start, n, false);
730 rte_rwlock_read_unlock(&arr->rwlock);
734 int __rte_experimental
735 rte_fbarray_find_next_n_used(struct rte_fbarray *arr, int start, int n)
739 if (arr == NULL || start < 0 || start >= arr->len ||
740 n < 0 || n > arr->len) {
745 /* prevent array from changing under us */
746 rte_rwlock_read_lock(&arr->rwlock);
748 if (arr->count < n) {
753 ret = find_next_n(arr, start, n, true);
755 rte_rwlock_read_unlock(&arr->rwlock);
759 int __rte_experimental
760 rte_fbarray_find_contig_free(struct rte_fbarray *arr, int start)
764 if (arr == NULL || start < 0 || start >= arr->len) {
769 /* prevent array from changing under us */
770 rte_rwlock_read_lock(&arr->rwlock);
772 if (arr->len == arr->count) {
777 if (arr->count == 0) {
778 ret = arr->len - start;
782 ret = find_contig(arr, start, false);
784 rte_rwlock_read_unlock(&arr->rwlock);
788 int __rte_experimental
789 rte_fbarray_find_contig_used(struct rte_fbarray *arr, int start)
793 if (arr == NULL || start < 0 || start >= arr->len) {
798 /* prevent array from changing under us */
799 rte_rwlock_read_lock(&arr->rwlock);
801 ret = find_contig(arr, start, true);
803 rte_rwlock_read_unlock(&arr->rwlock);
807 int __rte_experimental
808 rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
814 * no need to synchronize as it doesn't matter if underlying data
815 * changes - we're doing pointer arithmetic here.
818 if (arr == NULL || elt == NULL) {
822 end = RTE_PTR_ADD(arr->data, arr->elt_sz * arr->len);
823 if (elt < arr->data || elt >= end) {
828 ret = RTE_PTR_DIFF(elt, arr->data) / arr->elt_sz;
833 void __rte_experimental
834 rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
836 struct used_mask *msk;
839 if (arr == NULL || f == NULL) {
844 if (fully_validate(arr->name, arr->elt_sz, arr->len)) {
845 fprintf(f, "Invalid file-backed array\n");
849 /* prevent array from changing under us */
850 rte_rwlock_read_lock(&arr->rwlock);
852 fprintf(f, "File-backed array: %s\n", arr->name);
853 fprintf(f, "size: %i occupied: %i elt_sz: %i\n",
854 arr->len, arr->count, arr->elt_sz);
856 msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
858 for (i = 0; i < msk->n_masks; i++)
859 fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]);
861 rte_rwlock_read_unlock(&arr->rwlock);