1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 6WIND S.A.
9 #include <rte_common.h>
11 #include <rte_eal_memconfig.h>
12 #include <rte_tailq.h>
13 #include <rte_errno.h>
14 #include <rte_malloc.h>
15 #include <rte_string_fns.h>
17 #include <rte_mbuf_dyn.h>
19 #define RTE_MBUF_DYN_MZNAME "rte_mbuf_dyn"
21 struct mbuf_dynfield_elt {
22 struct rte_mbuf_dynfield params;
25 TAILQ_HEAD(mbuf_dynfield_list, rte_tailq_entry);
27 static struct rte_tailq_elem mbuf_dynfield_tailq = {
28 .name = "RTE_MBUF_DYNFIELD",
30 EAL_REGISTER_TAILQ(mbuf_dynfield_tailq);
32 struct mbuf_dynflag_elt {
33 struct rte_mbuf_dynflag params;
36 TAILQ_HEAD(mbuf_dynflag_list, rte_tailq_entry);
38 static struct rte_tailq_elem mbuf_dynflag_tailq = {
39 .name = "RTE_MBUF_DYNFLAG",
41 EAL_REGISTER_TAILQ(mbuf_dynflag_tailq);
45 * For each mbuf byte, free_space[i] != 0 if space is free.
46 * The value is the size of the biggest aligned element that
47 * can fit in the zone.
49 uint8_t free_space[sizeof(struct rte_mbuf)];
50 /** Bitfield of available flags. */
53 static struct mbuf_dyn_shm *shm;
55 /* Set the value of free_space[] according to the size and alignment of
56 * the free areas. This helps to select the best place when reserving a
57 * dynamic field. Assume tailq is locked.
62 size_t off, align, size, i;
64 /* first, erase previous info */
65 for (i = 0; i < sizeof(struct rte_mbuf); i++) {
66 if (shm->free_space[i])
67 shm->free_space[i] = 1;
71 while (off < sizeof(struct rte_mbuf)) {
72 /* get the size of the free zone */
73 for (size = 0; (off + size) < sizeof(struct rte_mbuf) &&
74 shm->free_space[off + size]; size++)
81 /* get the alignment of biggest object that can fit in
82 * the zone at this offset.
85 (off % (align << 1)) == 0 && (align << 1) <= size;
89 /* save it in free_space[] */
90 for (i = off; i < off + align; i++)
91 shm->free_space[i] = RTE_MAX(align, shm->free_space[i]);
97 /* Mark the area occupied by a mbuf field as available in the shm. */
98 #define mark_free(field) \
99 memset(&shm->free_space[offsetof(struct rte_mbuf, field)], \
100 1, sizeof(((struct rte_mbuf *)0)->field))
102 /* Allocate and initialize the shared memory. Assume tailq is locked */
104 init_shared_mem(void)
106 const struct rte_memzone *mz;
109 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
110 mz = rte_memzone_reserve_aligned(RTE_MBUF_DYN_MZNAME,
111 sizeof(struct mbuf_dyn_shm),
113 RTE_CACHE_LINE_SIZE);
115 mz = rte_memzone_lookup(RTE_MBUF_DYN_MZNAME);
122 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
123 /* init free_space, keep it sync'd with
124 * rte_mbuf_dynfield_copy().
126 memset(shm, 0, sizeof(*shm));
127 mark_free(dynfield1);
129 /* init free_flags */
130 for (mask = PKT_FIRST_FREE; mask <= PKT_LAST_FREE; mask <<= 1)
131 shm->free_flags |= mask;
139 /* check if this offset can be used */
141 check_offset(size_t offset, size_t size, size_t align)
145 if ((offset & (align - 1)) != 0)
147 if (offset + size > sizeof(struct rte_mbuf))
150 for (i = 0; i < size; i++) {
151 if (!shm->free_space[i + offset])
158 /* assume tailq is locked */
159 static struct mbuf_dynfield_elt *
160 __mbuf_dynfield_lookup(const char *name)
162 struct mbuf_dynfield_list *mbuf_dynfield_list;
163 struct mbuf_dynfield_elt *mbuf_dynfield;
164 struct rte_tailq_entry *te;
166 mbuf_dynfield_list = RTE_TAILQ_CAST(
167 mbuf_dynfield_tailq.head, mbuf_dynfield_list);
169 TAILQ_FOREACH(te, mbuf_dynfield_list, next) {
170 mbuf_dynfield = (struct mbuf_dynfield_elt *)te->data;
171 if (strcmp(name, mbuf_dynfield->params.name) == 0)
180 return mbuf_dynfield;
184 rte_mbuf_dynfield_lookup(const char *name, struct rte_mbuf_dynfield *params)
186 struct mbuf_dynfield_elt *mbuf_dynfield;
193 rte_mcfg_tailq_read_lock();
194 mbuf_dynfield = __mbuf_dynfield_lookup(name);
195 rte_mcfg_tailq_read_unlock();
197 if (mbuf_dynfield == NULL) {
203 memcpy(params, &mbuf_dynfield->params, sizeof(*params));
205 return mbuf_dynfield->offset;
208 static int mbuf_dynfield_cmp(const struct rte_mbuf_dynfield *params1,
209 const struct rte_mbuf_dynfield *params2)
211 if (strcmp(params1->name, params2->name))
213 if (params1->size != params2->size)
215 if (params1->align != params2->align)
217 if (params1->flags != params2->flags)
222 /* assume tailq is locked */
224 __rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params,
227 struct mbuf_dynfield_list *mbuf_dynfield_list;
228 struct mbuf_dynfield_elt *mbuf_dynfield = NULL;
229 struct rte_tailq_entry *te = NULL;
230 unsigned int best_zone = UINT_MAX;
234 if (shm == NULL && init_shared_mem() < 0)
237 mbuf_dynfield = __mbuf_dynfield_lookup(params->name);
238 if (mbuf_dynfield != NULL) {
239 if (req != SIZE_MAX && req != mbuf_dynfield->offset) {
243 if (mbuf_dynfield_cmp(params, &mbuf_dynfield->params) < 0) {
247 return mbuf_dynfield->offset;
250 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
255 if (req == SIZE_MAX) {
256 /* Find the best place to put this field: we search the
257 * lowest value of shm->free_space[offset]: the zones
258 * containing room for larger fields are kept for later.
261 offset < sizeof(struct rte_mbuf);
263 if (check_offset(offset, params->size,
264 params->align) == 0 &&
265 shm->free_space[offset] < best_zone) {
266 best_zone = shm->free_space[offset];
270 if (req == SIZE_MAX) {
275 if (check_offset(req, params->size, params->align) < 0) {
282 mbuf_dynfield_list = RTE_TAILQ_CAST(
283 mbuf_dynfield_tailq.head, mbuf_dynfield_list);
285 te = rte_zmalloc("MBUF_DYNFIELD_TAILQ_ENTRY", sizeof(*te), 0);
291 mbuf_dynfield = rte_zmalloc("mbuf_dynfield", sizeof(*mbuf_dynfield), 0);
292 if (mbuf_dynfield == NULL) {
298 ret = strlcpy(mbuf_dynfield->params.name, params->name,
299 sizeof(mbuf_dynfield->params.name));
300 if (ret < 0 || ret >= (int)sizeof(mbuf_dynfield->params.name)) {
301 rte_errno = ENAMETOOLONG;
302 rte_free(mbuf_dynfield);
306 memcpy(&mbuf_dynfield->params, params, sizeof(mbuf_dynfield->params));
307 mbuf_dynfield->offset = offset;
308 te->data = mbuf_dynfield;
310 TAILQ_INSERT_TAIL(mbuf_dynfield_list, te, next);
312 for (i = offset; i < offset + params->size; i++)
313 shm->free_space[i] = 0;
316 RTE_LOG(DEBUG, MBUF, "Registered dynamic field %s (sz=%zu, al=%zu, fl=0x%x) -> %zd\n",
317 params->name, params->size, params->align, params->flags,
324 rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params,
329 if (params->size >= sizeof(struct rte_mbuf)) {
333 if (!rte_is_power_of_2(params->align)) {
337 if (params->flags != 0) {
342 rte_mcfg_tailq_write_lock();
343 ret = __rte_mbuf_dynfield_register_offset(params, req);
344 rte_mcfg_tailq_write_unlock();
350 rte_mbuf_dynfield_register(const struct rte_mbuf_dynfield *params)
352 return rte_mbuf_dynfield_register_offset(params, SIZE_MAX);
355 /* assume tailq is locked */
356 static struct mbuf_dynflag_elt *
357 __mbuf_dynflag_lookup(const char *name)
359 struct mbuf_dynflag_list *mbuf_dynflag_list;
360 struct mbuf_dynflag_elt *mbuf_dynflag;
361 struct rte_tailq_entry *te;
363 mbuf_dynflag_list = RTE_TAILQ_CAST(
364 mbuf_dynflag_tailq.head, mbuf_dynflag_list);
366 TAILQ_FOREACH(te, mbuf_dynflag_list, next) {
367 mbuf_dynflag = (struct mbuf_dynflag_elt *)te->data;
368 if (strncmp(name, mbuf_dynflag->params.name,
369 RTE_MBUF_DYN_NAMESIZE) == 0)
382 rte_mbuf_dynflag_lookup(const char *name,
383 struct rte_mbuf_dynflag *params)
385 struct mbuf_dynflag_elt *mbuf_dynflag;
392 rte_mcfg_tailq_read_lock();
393 mbuf_dynflag = __mbuf_dynflag_lookup(name);
394 rte_mcfg_tailq_read_unlock();
396 if (mbuf_dynflag == NULL) {
402 memcpy(params, &mbuf_dynflag->params, sizeof(*params));
404 return mbuf_dynflag->bitnum;
407 static int mbuf_dynflag_cmp(const struct rte_mbuf_dynflag *params1,
408 const struct rte_mbuf_dynflag *params2)
410 if (strcmp(params1->name, params2->name))
412 if (params1->flags != params2->flags)
417 /* assume tailq is locked */
419 __rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params,
422 struct mbuf_dynflag_list *mbuf_dynflag_list;
423 struct mbuf_dynflag_elt *mbuf_dynflag = NULL;
424 struct rte_tailq_entry *te = NULL;
428 if (shm == NULL && init_shared_mem() < 0)
431 mbuf_dynflag = __mbuf_dynflag_lookup(params->name);
432 if (mbuf_dynflag != NULL) {
433 if (req != UINT_MAX && req != mbuf_dynflag->bitnum) {
437 if (mbuf_dynflag_cmp(params, &mbuf_dynflag->params) < 0) {
441 return mbuf_dynflag->bitnum;
444 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
449 if (req == UINT_MAX) {
450 if (shm->free_flags == 0) {
454 bitnum = rte_bsf64(shm->free_flags);
456 if ((shm->free_flags & (1ULL << req)) == 0) {
463 mbuf_dynflag_list = RTE_TAILQ_CAST(
464 mbuf_dynflag_tailq.head, mbuf_dynflag_list);
466 te = rte_zmalloc("MBUF_DYNFLAG_TAILQ_ENTRY", sizeof(*te), 0);
472 mbuf_dynflag = rte_zmalloc("mbuf_dynflag", sizeof(*mbuf_dynflag), 0);
473 if (mbuf_dynflag == NULL) {
479 ret = strlcpy(mbuf_dynflag->params.name, params->name,
480 sizeof(mbuf_dynflag->params.name));
481 if (ret < 0 || ret >= (int)sizeof(mbuf_dynflag->params.name)) {
482 rte_free(mbuf_dynflag);
484 rte_errno = ENAMETOOLONG;
487 mbuf_dynflag->bitnum = bitnum;
488 te->data = mbuf_dynflag;
490 TAILQ_INSERT_TAIL(mbuf_dynflag_list, te, next);
492 shm->free_flags &= ~(1ULL << bitnum);
494 RTE_LOG(DEBUG, MBUF, "Registered dynamic flag %s (fl=0x%x) -> %u\n",
495 params->name, params->flags, bitnum);
501 rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params,
506 if (req >= RTE_SIZEOF_FIELD(struct rte_mbuf, ol_flags) * CHAR_BIT &&
512 rte_mcfg_tailq_write_lock();
513 ret = __rte_mbuf_dynflag_register_bitnum(params, req);
514 rte_mcfg_tailq_write_unlock();
520 rte_mbuf_dynflag_register(const struct rte_mbuf_dynflag *params)
522 return rte_mbuf_dynflag_register_bitnum(params, UINT_MAX);
525 void rte_mbuf_dyn_dump(FILE *out)
527 struct mbuf_dynfield_list *mbuf_dynfield_list;
528 struct mbuf_dynfield_elt *dynfield;
529 struct mbuf_dynflag_list *mbuf_dynflag_list;
530 struct mbuf_dynflag_elt *dynflag;
531 struct rte_tailq_entry *te;
534 rte_mcfg_tailq_write_lock();
536 fprintf(out, "Reserved fields:\n");
537 mbuf_dynfield_list = RTE_TAILQ_CAST(
538 mbuf_dynfield_tailq.head, mbuf_dynfield_list);
539 TAILQ_FOREACH(te, mbuf_dynfield_list, next) {
540 dynfield = (struct mbuf_dynfield_elt *)te->data;
541 fprintf(out, " name=%s offset=%zd size=%zd align=%zd flags=%x\n",
542 dynfield->params.name, dynfield->offset,
543 dynfield->params.size, dynfield->params.align,
544 dynfield->params.flags);
546 fprintf(out, "Reserved flags:\n");
547 mbuf_dynflag_list = RTE_TAILQ_CAST(
548 mbuf_dynflag_tailq.head, mbuf_dynflag_list);
549 TAILQ_FOREACH(te, mbuf_dynflag_list, next) {
550 dynflag = (struct mbuf_dynflag_elt *)te->data;
551 fprintf(out, " name=%s bitnum=%u flags=%x\n",
552 dynflag->params.name, dynflag->bitnum,
553 dynflag->params.flags);
555 fprintf(out, "Free space in mbuf (0 = occupied, value = free zone alignment):\n");
556 for (i = 0; i < sizeof(struct rte_mbuf); i++) {
558 fprintf(out, " %4.4zx: ", i);
559 fprintf(out, "%2.2x%s", shm->free_space[i],
560 (i % 8 != 7) ? " " : "\n");
562 fprintf(out, "Free bit in mbuf->ol_flags (0 = occupied, 1 = free):\n");
563 for (i = 0; i < sizeof(uint64_t) * CHAR_BIT; i++) {
565 fprintf(out, " %4.4zx: ", i);
566 fprintf(out, "%1.1x%s", (shm->free_flags & (1ULL << i)) ? 1 : 0,
567 (i % 8 != 7) ? " " : "\n");
570 rte_mcfg_tailq_write_unlock();