1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_errno.h>
16 #include <rte_debug.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
28 #include <rte_random.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc.h>
31 #include <rte_ether.h>
34 #include <rte_mbuf_dyn.h>
38 #define MEMPOOL_CACHE_SIZE 32
39 #define MBUF_DATA_SIZE 2048
41 #define MBUF_TEST_DATA_LEN 1464
42 #define MBUF_TEST_DATA_LEN2 50
43 #define MBUF_TEST_DATA_LEN3 256
44 #define MBUF_TEST_HDR1_LEN 20
45 #define MBUF_TEST_HDR2_LEN 30
46 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
47 #define MBUF_TEST_SEG_SIZE 64
48 #define MBUF_TEST_BURST 8
49 #define EXT_BUF_TEST_DATA_LEN 1024
50 #define MBUF_MAX_SEG 16
51 #define MBUF_NO_HEADER 0
53 #define MBUF_NEG_TEST_READ 2
54 #define VAL_NAME(flag) { flag, #flag }
56 /* chain length in bulk test */
59 /* size of private data for mbuf in pktmbuf_pool2 */
60 #define MBUF2_PRIV_SIZE 128
62 #define REFCNT_MAX_ITER 64
63 #define REFCNT_MAX_TIMEOUT 10
64 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
65 #define REFCNT_MBUF_NUM 64
66 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
68 #define MAGIC_DATA 0x42424242
70 #define MAKE_STRING(x) # x
72 #ifdef RTE_MBUF_REFCNT_ATOMIC
74 static volatile uint32_t refcnt_stop_workers;
75 static unsigned refcnt_lcore[RTE_MAX_LCORE];
83 * #. Allocate a mbuf pool.
85 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
88 * #. Test multiple allocations of mbufs from this pool.
90 * - Allocate NB_MBUF and store pointers in a table.
91 * - If an allocation fails, return an error.
92 * - Free all these mbufs.
93 * - Repeat the same test to check that mbufs were freed correctly.
95 * #. Test data manipulation in pktmbuf.
98 * - Append data using rte_pktmbuf_append().
99 * - Test for error in rte_pktmbuf_append() when len is too large.
100 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
101 * - Test for error in rte_pktmbuf_trim() when len is too large.
102 * - Prepend a header using rte_pktmbuf_prepend().
103 * - Test for error in rte_pktmbuf_prepend() when len is too large.
104 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
105 * - Test for error in rte_pktmbuf_adj() when len is too large.
106 * - Check that appended data is not corrupt.
108 * - Between all these tests, check data_len and pkt_len, and
109 * that the mbuf is contiguous.
110 * - Repeat the test to check that allocation operations
111 * reinitialize the mbuf correctly.
113 * #. Test packet cloning
114 * - Clone a mbuf and verify the data
115 * - Clone the cloned mbuf and verify the data
116 * - Attach a mbuf to another that does not have the same priv_size.
119 #define GOTO_FAIL(str, ...) do { \
120 printf("mbuf test FAILED (l.%d): <" str ">\n", \
121 __LINE__, ##__VA_ARGS__); \
126 * test data manipulation in mbuf with non-ascii data
129 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
131 struct rte_mbuf *m = NULL;
134 m = rte_pktmbuf_alloc(pktmbuf_pool);
136 GOTO_FAIL("Cannot allocate mbuf");
137 if (rte_pktmbuf_pkt_len(m) != 0)
138 GOTO_FAIL("Bad length");
140 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
142 GOTO_FAIL("Cannot append data");
143 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
144 GOTO_FAIL("Bad pkt length");
145 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
146 GOTO_FAIL("Bad data length");
147 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
148 if (!rte_pktmbuf_is_contiguous(m))
149 GOTO_FAIL("Buffer should be continuous");
150 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
164 * test data manipulation in mbuf
167 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
169 struct rte_mbuf *m = NULL;
170 char *data, *data2, *hdr;
173 printf("Test pktmbuf API\n");
177 m = rte_pktmbuf_alloc(pktmbuf_pool);
179 GOTO_FAIL("Cannot allocate mbuf");
180 if (rte_pktmbuf_pkt_len(m) != 0)
181 GOTO_FAIL("Bad length");
183 rte_pktmbuf_dump(stdout, m, 0);
187 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
189 GOTO_FAIL("Cannot append data");
190 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
191 GOTO_FAIL("Bad pkt length");
192 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
193 GOTO_FAIL("Bad data length");
194 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
195 if (!rte_pktmbuf_is_contiguous(m))
196 GOTO_FAIL("Buffer should be continuous");
197 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
198 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
200 /* this append should fail */
202 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
204 GOTO_FAIL("Append should not succeed");
206 /* append some more data */
208 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
210 GOTO_FAIL("Cannot append data");
211 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
212 GOTO_FAIL("Bad pkt length");
213 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
214 GOTO_FAIL("Bad data length");
215 if (!rte_pktmbuf_is_contiguous(m))
216 GOTO_FAIL("Buffer should be continuous");
218 /* trim data at the end of mbuf */
220 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
221 GOTO_FAIL("Cannot trim data");
222 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
223 GOTO_FAIL("Bad pkt length");
224 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
225 GOTO_FAIL("Bad data length");
226 if (!rte_pktmbuf_is_contiguous(m))
227 GOTO_FAIL("Buffer should be continuous");
229 /* this trim should fail */
231 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
232 GOTO_FAIL("trim should not succeed");
234 /* prepend one header */
236 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
238 GOTO_FAIL("Cannot prepend");
239 if (data - hdr != MBUF_TEST_HDR1_LEN)
240 GOTO_FAIL("Prepend failed");
241 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
242 GOTO_FAIL("Bad pkt length");
243 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
244 GOTO_FAIL("Bad data length");
245 if (!rte_pktmbuf_is_contiguous(m))
246 GOTO_FAIL("Buffer should be continuous");
247 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
249 /* prepend another header */
251 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
253 GOTO_FAIL("Cannot prepend");
254 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
255 GOTO_FAIL("Prepend failed");
256 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
257 GOTO_FAIL("Bad pkt length");
258 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
259 GOTO_FAIL("Bad data length");
260 if (!rte_pktmbuf_is_contiguous(m))
261 GOTO_FAIL("Buffer should be continuous");
262 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
264 rte_mbuf_sanity_check(m, 1);
265 rte_mbuf_sanity_check(m, 0);
266 rte_pktmbuf_dump(stdout, m, 0);
268 /* this prepend should fail */
270 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
272 GOTO_FAIL("prepend should not succeed");
274 /* remove data at beginning of mbuf (adj) */
276 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
277 GOTO_FAIL("rte_pktmbuf_adj failed");
278 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
279 GOTO_FAIL("Bad pkt length");
280 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
281 GOTO_FAIL("Bad data length");
282 if (!rte_pktmbuf_is_contiguous(m))
283 GOTO_FAIL("Buffer should be continuous");
285 /* this adj should fail */
287 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
288 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
292 if (!rte_pktmbuf_is_contiguous(m))
293 GOTO_FAIL("Buffer should be continuous");
295 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
297 GOTO_FAIL("Data corrupted at offset %u", i);
313 testclone_refcnt_read(struct rte_mbuf *m)
315 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ?
316 rte_mbuf_ext_refcnt_read(m->shinfo) :
317 rte_mbuf_refcnt_read(m);
321 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool,
322 struct rte_mempool *clone_pool)
324 struct rte_mbuf *m = NULL;
325 struct rte_mbuf *clone = NULL;
326 struct rte_mbuf *clone2 = NULL;
327 unaligned_uint32_t *data;
330 m = rte_pktmbuf_alloc(pktmbuf_pool);
332 GOTO_FAIL("ooops not allocating mbuf");
334 if (rte_pktmbuf_pkt_len(m) != 0)
335 GOTO_FAIL("Bad length");
337 rte_pktmbuf_append(m, sizeof(uint32_t));
338 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
341 /* clone the allocated mbuf */
342 clone = rte_pktmbuf_clone(m, clone_pool);
344 GOTO_FAIL("cannot clone data\n");
346 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
347 if (*data != MAGIC_DATA)
348 GOTO_FAIL("invalid data in clone\n");
350 if (testclone_refcnt_read(m) != 2)
351 GOTO_FAIL("invalid refcnt in m\n");
354 rte_pktmbuf_free(clone);
357 /* same test with a chained mbuf */
358 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
360 GOTO_FAIL("Next Pkt Null\n");
363 rte_pktmbuf_append(m->next, sizeof(uint32_t));
364 m->pkt_len = 2 * sizeof(uint32_t);
366 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
369 clone = rte_pktmbuf_clone(m, clone_pool);
371 GOTO_FAIL("cannot clone data\n");
373 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
374 if (*data != MAGIC_DATA)
375 GOTO_FAIL("invalid data in clone\n");
377 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
378 if (*data != MAGIC_DATA)
379 GOTO_FAIL("invalid data in clone->next\n");
381 if (testclone_refcnt_read(m) != 2)
382 GOTO_FAIL("invalid refcnt in m\n");
384 if (testclone_refcnt_read(m->next) != 2)
385 GOTO_FAIL("invalid refcnt in m->next\n");
387 /* try to clone the clone */
389 clone2 = rte_pktmbuf_clone(clone, clone_pool);
391 GOTO_FAIL("cannot clone the clone\n");
393 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
394 if (*data != MAGIC_DATA)
395 GOTO_FAIL("invalid data in clone2\n");
397 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
398 if (*data != MAGIC_DATA)
399 GOTO_FAIL("invalid data in clone2->next\n");
401 if (testclone_refcnt_read(m) != 3)
402 GOTO_FAIL("invalid refcnt in m\n");
404 if (testclone_refcnt_read(m->next) != 3)
405 GOTO_FAIL("invalid refcnt in m->next\n");
409 rte_pktmbuf_free(clone);
410 rte_pktmbuf_free(clone2);
415 printf("%s ok\n", __func__);
422 rte_pktmbuf_free(clone);
424 rte_pktmbuf_free(clone2);
429 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool,
430 struct rte_mempool *clone_pool)
432 struct rte_mbuf *m = NULL;
433 struct rte_mbuf *copy = NULL;
434 struct rte_mbuf *copy2 = NULL;
435 struct rte_mbuf *clone = NULL;
436 unaligned_uint32_t *data;
439 m = rte_pktmbuf_alloc(pktmbuf_pool);
441 GOTO_FAIL("ooops not allocating mbuf");
443 if (rte_pktmbuf_pkt_len(m) != 0)
444 GOTO_FAIL("Bad length");
446 rte_pktmbuf_append(m, sizeof(uint32_t));
447 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
450 /* copy the allocated mbuf */
451 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
453 GOTO_FAIL("cannot copy data\n");
455 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
456 GOTO_FAIL("copy length incorrect\n");
458 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
459 GOTO_FAIL("copy data length incorrect\n");
461 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
462 if (*data != MAGIC_DATA)
463 GOTO_FAIL("invalid data in copy\n");
466 rte_pktmbuf_free(copy);
469 /* same test with a cloned mbuf */
470 clone = rte_pktmbuf_clone(m, clone_pool);
472 GOTO_FAIL("cannot clone data\n");
474 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
475 !RTE_MBUF_CLONED(clone)) ||
476 (RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
477 !RTE_MBUF_HAS_EXTBUF(clone)))
478 GOTO_FAIL("clone did not give a cloned mbuf\n");
480 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
482 GOTO_FAIL("cannot copy cloned mbuf\n");
484 if (RTE_MBUF_CLONED(copy))
485 GOTO_FAIL("copy of clone is cloned?\n");
487 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
488 GOTO_FAIL("copy clone length incorrect\n");
490 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
491 GOTO_FAIL("copy clone data length incorrect\n");
493 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
494 if (*data != MAGIC_DATA)
495 GOTO_FAIL("invalid data in clone copy\n");
496 rte_pktmbuf_free(clone);
497 rte_pktmbuf_free(copy);
502 /* same test with a chained mbuf */
503 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
505 GOTO_FAIL("Next Pkt Null\n");
508 rte_pktmbuf_append(m->next, sizeof(uint32_t));
509 m->pkt_len = 2 * sizeof(uint32_t);
510 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
511 *data = MAGIC_DATA + 1;
513 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
515 GOTO_FAIL("cannot copy data\n");
517 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
518 GOTO_FAIL("chain copy length incorrect\n");
520 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
521 GOTO_FAIL("chain copy data length incorrect\n");
523 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
524 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
525 GOTO_FAIL("invalid data in copy\n");
527 rte_pktmbuf_free(copy2);
529 /* test offset copy */
530 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
531 sizeof(uint32_t), UINT32_MAX);
533 GOTO_FAIL("cannot copy the copy\n");
535 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
536 GOTO_FAIL("copy with offset, length incorrect\n");
538 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
539 GOTO_FAIL("copy with offset, data length incorrect\n");
541 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
542 if (data[0] != MAGIC_DATA + 1)
543 GOTO_FAIL("copy with offset, invalid data\n");
545 rte_pktmbuf_free(copy2);
547 /* test truncation copy */
548 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
549 0, sizeof(uint32_t));
551 GOTO_FAIL("cannot copy the copy\n");
553 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
554 GOTO_FAIL("copy with truncate, length incorrect\n");
556 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
557 GOTO_FAIL("copy with truncate, data length incorrect\n");
559 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
560 if (data[0] != MAGIC_DATA)
561 GOTO_FAIL("copy with truncate, invalid data\n");
565 rte_pktmbuf_free(copy);
566 rte_pktmbuf_free(copy2);
571 printf("%s ok\n", __func__);
578 rte_pktmbuf_free(copy);
580 rte_pktmbuf_free(copy2);
585 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
586 struct rte_mempool *pktmbuf_pool2)
588 struct rte_mbuf *m = NULL;
589 struct rte_mbuf *clone = NULL;
590 struct rte_mbuf *clone2 = NULL;
591 char *data, *c_data, *c_data2;
594 m = rte_pktmbuf_alloc(pktmbuf_pool);
596 GOTO_FAIL("cannot allocate mbuf");
598 if (rte_pktmbuf_pkt_len(m) != 0)
599 GOTO_FAIL("Bad length");
601 data = rte_pktmbuf_mtod(m, char *);
603 /* allocate a new mbuf from the second pool, and attach it to the first
605 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
607 GOTO_FAIL("cannot allocate mbuf from second pool\n");
609 /* check data room size and priv size, and erase priv */
610 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
611 GOTO_FAIL("data room size should be 0\n");
612 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
613 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
614 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
616 /* save data pointer to compare it after detach() */
617 c_data = rte_pktmbuf_mtod(clone, char *);
618 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
619 GOTO_FAIL("bad data pointer in clone");
620 if (rte_pktmbuf_headroom(clone) != 0)
621 GOTO_FAIL("bad headroom in clone");
623 rte_pktmbuf_attach(clone, m);
625 if (rte_pktmbuf_mtod(clone, char *) != data)
626 GOTO_FAIL("clone was not attached properly\n");
627 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
628 GOTO_FAIL("bad headroom in clone after attach");
629 if (rte_mbuf_refcnt_read(m) != 2)
630 GOTO_FAIL("invalid refcnt in m\n");
632 /* allocate a new mbuf from the second pool, and attach it to the first
634 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
636 GOTO_FAIL("cannot allocate clone2 from second pool\n");
638 /* check data room size and priv size, and erase priv */
639 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
640 GOTO_FAIL("data room size should be 0\n");
641 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
642 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
643 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
645 /* save data pointer to compare it after detach() */
646 c_data2 = rte_pktmbuf_mtod(clone2, char *);
647 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
648 GOTO_FAIL("bad data pointer in clone2");
649 if (rte_pktmbuf_headroom(clone2) != 0)
650 GOTO_FAIL("bad headroom in clone2");
652 rte_pktmbuf_attach(clone2, clone);
654 if (rte_pktmbuf_mtod(clone2, char *) != data)
655 GOTO_FAIL("clone2 was not attached properly\n");
656 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
657 GOTO_FAIL("bad headroom in clone2 after attach");
658 if (rte_mbuf_refcnt_read(m) != 3)
659 GOTO_FAIL("invalid refcnt in m\n");
661 /* detach the clones */
662 rte_pktmbuf_detach(clone);
663 if (c_data != rte_pktmbuf_mtod(clone, char *))
664 GOTO_FAIL("clone was not detached properly\n");
665 if (rte_mbuf_refcnt_read(m) != 2)
666 GOTO_FAIL("invalid refcnt in m\n");
668 rte_pktmbuf_detach(clone2);
669 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
670 GOTO_FAIL("clone2 was not detached properly\n");
671 if (rte_mbuf_refcnt_read(m) != 1)
672 GOTO_FAIL("invalid refcnt in m\n");
674 /* free the clones and the initial mbuf */
675 rte_pktmbuf_free(clone2);
676 rte_pktmbuf_free(clone);
678 printf("%s ok\n", __func__);
685 rte_pktmbuf_free(clone);
687 rte_pktmbuf_free(clone2);
692 * test allocation and free of mbufs
695 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
698 struct rte_mbuf *m[NB_MBUF];
701 for (i=0; i<NB_MBUF; i++)
704 /* alloc NB_MBUF mbufs */
705 for (i=0; i<NB_MBUF; i++) {
706 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
708 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
712 struct rte_mbuf *extra = NULL;
713 extra = rte_pktmbuf_alloc(pktmbuf_pool);
715 printf("Error pool not empty");
718 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
720 printf("Error pool not empty");
724 for (i=0; i<NB_MBUF; i++) {
726 rte_pktmbuf_free(m[i]);
733 * test bulk allocation and bulk free of mbufs
736 test_pktmbuf_pool_bulk(void)
738 struct rte_mempool *pool = NULL;
739 struct rte_mempool *pool2 = NULL;
742 struct rte_mbuf *mbufs[NB_MBUF];
745 /* We cannot use the preallocated mbuf pools because their caches
746 * prevent us from bulk allocating all objects in them.
747 * So we create our own mbuf pools without caches.
749 printf("Create mbuf pools for bulk allocation.\n");
750 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
751 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
753 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
757 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
758 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
760 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
765 /* Preconditions: Mempools must be full. */
766 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
767 printf("Test precondition failed: mempools not full\n");
770 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
771 rte_mempool_avail_count(pool2) == NB_MBUF)) {
772 printf("Test precondition failed: mempools: %u+%u != %u+%u",
773 rte_mempool_avail_count(pool),
774 rte_mempool_avail_count(pool2),
779 printf("Test single bulk alloc, followed by multiple bulk free.\n");
781 /* Bulk allocate all mbufs in the pool, in one go. */
782 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
784 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
787 /* Test that they have been removed from the pool. */
788 if (!rte_mempool_empty(pool)) {
789 printf("mempool not empty\n");
792 /* Bulk free all mbufs, in four steps. */
793 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
794 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
795 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
796 /* Test that they have been returned to the pool. */
797 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
798 printf("mempool avail count incorrect\n");
803 printf("Test multiple bulk alloc, followed by single bulk free.\n");
805 /* Bulk allocate all mbufs in the pool, in four steps. */
806 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
807 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
809 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
813 /* Test that they have been removed from the pool. */
814 if (!rte_mempool_empty(pool)) {
815 printf("mempool not empty\n");
818 /* Bulk free all mbufs, in one go. */
819 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
820 /* Test that they have been returned to the pool. */
821 if (!rte_mempool_full(pool)) {
822 printf("mempool not full\n");
826 printf("Test bulk free of single long chain.\n");
828 /* Bulk allocate all mbufs in the pool, in one go. */
829 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
831 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
834 /* Create a long mbuf chain. */
835 for (i = 1; i < NB_MBUF; i++) {
836 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
838 printf("rte_pktmbuf_chain() failed: %d\n", ret);
843 /* Free the mbuf chain containing all the mbufs. */
844 rte_pktmbuf_free_bulk(mbufs, 1);
845 /* Test that they have been returned to the pool. */
846 if (!rte_mempool_full(pool)) {
847 printf("mempool not full\n");
851 printf("Test bulk free of multiple chains using multiple pools.\n");
853 /* Create mbuf chains containing mbufs from different pools. */
854 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
855 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
856 for (i = 0; i < NB_MBUF * 2; i++) {
857 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
859 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
862 if ((i % CHAIN_LEN) == 0)
863 mbufs[i / CHAIN_LEN] = m;
865 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
867 /* Test that both pools have been emptied. */
868 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
869 printf("mempools not empty\n");
872 /* Free one mbuf chain. */
873 rte_pktmbuf_free_bulk(mbufs, 1);
874 /* Test that the segments have been returned to the pools. */
875 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
876 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
877 printf("all segments of first mbuf have not been returned\n");
880 /* Free the remaining mbuf chains. */
881 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
882 /* Test that they have been returned to the pools. */
883 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
884 printf("mempools not full\n");
895 printf("Free mbuf pools for bulk allocation.\n");
896 rte_mempool_free(pool);
897 rte_mempool_free(pool2);
902 * test that the pointer to the data on a packet mbuf is set properly
905 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
908 struct rte_mbuf *m[NB_MBUF];
911 for (i=0; i<NB_MBUF; i++)
914 /* alloc NB_MBUF mbufs */
915 for (i=0; i<NB_MBUF; i++) {
916 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
918 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
922 m[i]->data_off += 64;
926 for (i=0; i<NB_MBUF; i++) {
928 rte_pktmbuf_free(m[i]);
931 for (i=0; i<NB_MBUF; i++)
934 /* alloc NB_MBUF mbufs */
935 for (i=0; i<NB_MBUF; i++) {
936 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
938 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
942 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
943 printf("invalid data_off\n");
949 for (i=0; i<NB_MBUF; i++) {
951 rte_pktmbuf_free(m[i]);
958 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
961 struct rte_mbuf *m[NB_MBUF];
964 for (i=0; i<NB_MBUF; i++)
967 /* alloc NB_MBUF mbufs */
968 for (i=0; i<NB_MBUF; i++) {
969 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
971 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
977 for (i=0; i<NB_MBUF; i++) {
979 struct rte_mbuf *mb, *mt;
985 rte_pktmbuf_free_seg(mt);
994 * Stress test for rte_mbuf atomic refcnt.
995 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
996 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
999 #ifdef RTE_MBUF_REFCNT_ATOMIC
1002 test_refcnt_worker(void *arg)
1004 unsigned lcore, free;
1006 struct rte_ring *refcnt_mbuf_ring = arg;
1008 lcore = rte_lcore_id();
1009 printf("%s started at lcore %u\n", __func__, lcore);
1012 while (refcnt_stop_workers == 0) {
1013 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1015 rte_pktmbuf_free(mp);
1019 refcnt_lcore[lcore] += free;
1020 printf("%s finished at lcore %u, "
1021 "number of freed mbufs: %u\n",
1022 __func__, lcore, free);
1027 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1028 struct rte_mempool *refcnt_pool,
1029 struct rte_ring *refcnt_mbuf_ring)
1032 unsigned i, n, tref, wn;
1037 /* For each mbuf in the pool:
1039 * - increment it's reference up to N+1,
1040 * - enqueue it N times into the ring for worker cores to free.
1042 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1043 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1045 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1047 if ((ref & 1) != 0) {
1048 rte_pktmbuf_refcnt_update(m, ref);
1050 rte_ring_enqueue(refcnt_mbuf_ring, m);
1052 while (ref-- != 0) {
1053 rte_pktmbuf_refcnt_update(m, 1);
1054 rte_ring_enqueue(refcnt_mbuf_ring, m);
1057 rte_pktmbuf_free(m);
1061 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1062 "%u from %u mbufs\n", lcore, iter, i, n);
1064 /* wait till worker lcores will consume all mbufs */
1065 while (!rte_ring_empty(refcnt_mbuf_ring))
1068 /* check that all mbufs are back into mempool by now */
1069 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1070 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1071 refcnt_lcore[lcore] += tref;
1072 printf("%s(lcore=%u, iter=%u) completed, "
1073 "%u references processed\n",
1074 __func__, lcore, iter, tref);
1080 rte_panic("(lcore=%u, iter=%u): after %us only "
1081 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1085 test_refcnt_main(struct rte_mempool *refcnt_pool,
1086 struct rte_ring *refcnt_mbuf_ring)
1090 lcore = rte_lcore_id();
1091 printf("%s started at lcore %u\n", __func__, lcore);
1093 for (i = 0; i != REFCNT_MAX_ITER; i++)
1094 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1096 refcnt_stop_workers = 1;
1099 printf("%s finished at lcore %u\n", __func__, lcore);
1106 test_refcnt_mbuf(void)
1108 #ifdef RTE_MBUF_REFCNT_ATOMIC
1109 unsigned int main_lcore, worker, tref;
1111 struct rte_mempool *refcnt_pool = NULL;
1112 struct rte_ring *refcnt_mbuf_ring = NULL;
1114 if (rte_lcore_count() < 2) {
1115 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1116 return TEST_SKIPPED;
1119 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1121 /* create refcnt pool & ring if they don't exist */
1123 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1124 REFCNT_MBUF_NUM, 0, 0, 0,
1126 if (refcnt_pool == NULL) {
1127 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1132 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1133 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1135 if (refcnt_mbuf_ring == NULL) {
1136 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1141 refcnt_stop_workers = 0;
1142 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1144 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
1146 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
1148 rte_eal_mp_wait_lcore();
1150 /* check that we processed all references */
1152 main_lcore = rte_get_main_lcore();
1154 RTE_LCORE_FOREACH_WORKER(worker)
1155 tref += refcnt_lcore[worker];
1157 if (tref != refcnt_lcore[main_lcore])
1158 rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
1159 tref, refcnt_lcore[main_lcore]);
1161 rte_mempool_dump(stdout, refcnt_pool);
1162 rte_ring_dump(stdout, refcnt_mbuf_ring);
1167 rte_mempool_free(refcnt_pool);
1168 rte_ring_free(refcnt_mbuf_ring);
1176 #include <sys/resource.h>
1177 #include <sys/time.h>
1178 #include <sys/wait.h>
1180 /* use fork() to test mbuf errors panic */
1182 verify_mbuf_check_panics(struct rte_mbuf *buf)
1192 /* No need to generate a coredump when panicking. */
1193 rl.rlim_cur = rl.rlim_max = 0;
1194 setrlimit(RLIMIT_CORE, &rl);
1195 rte_mbuf_sanity_check(buf, 1); /* should panic */
1196 exit(0); /* return normally if it doesn't panic */
1197 } else if (pid < 0) {
1198 printf("Fork Failed\n");
1209 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1211 struct rte_mbuf *buf;
1212 struct rte_mbuf badbuf;
1214 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1216 /* get a good mbuf to use to make copies */
1217 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1221 printf("Checking good mbuf initially\n");
1222 if (verify_mbuf_check_panics(buf) != -1)
1225 printf("Now checking for error conditions\n");
1227 if (verify_mbuf_check_panics(NULL)) {
1228 printf("Error with NULL mbuf test\n");
1234 if (verify_mbuf_check_panics(&badbuf)) {
1235 printf("Error with bad-pool mbuf test\n");
1240 badbuf.buf_iova = 0;
1241 if (verify_mbuf_check_panics(&badbuf)) {
1242 printf("Error with bad-physaddr mbuf test\n");
1247 badbuf.buf_addr = NULL;
1248 if (verify_mbuf_check_panics(&badbuf)) {
1249 printf("Error with bad-addr mbuf test\n");
1255 if (verify_mbuf_check_panics(&badbuf)) {
1256 printf("Error with bad-refcnt(0) mbuf test\n");
1261 badbuf.refcnt = UINT16_MAX;
1262 if (verify_mbuf_check_panics(&badbuf)) {
1263 printf("Error with bad-refcnt(MAX) mbuf test\n");
1271 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1275 struct rte_mbuf *m = NULL, *mbuf = NULL;
1283 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1288 printf("Number of segments must be 1 or more (is %d)\n",
1293 seg_len = pkt_len / nb_segs;
1299 /* Create chained mbuf_src and fill it generated data */
1300 for (seg = 0; remain > 0; seg++) {
1302 m = rte_pktmbuf_alloc(pktmbuf_pool);
1304 printf("Cannot create segment for source mbuf");
1308 /* Make sure if tailroom is zeroed */
1309 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1310 rte_pktmbuf_tailroom(m));
1313 if (data_len > seg_len)
1316 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1318 printf("Cannot append %d bytes to the mbuf\n",
1323 for (i = 0; i < data_len; i++)
1324 data[i] = (seg * seg_len + i) % 0x0ff;
1329 rte_pktmbuf_chain(mbuf, m);
1334 /* Create destination buffer to store coalesced data */
1335 if (rte_pktmbuf_linearize(mbuf)) {
1336 printf("Mbuf linearization failed\n");
1340 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1341 printf("Source buffer should be contiguous after "
1346 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1348 for (i = 0; i < pkt_len; i++)
1349 if (data[i] != (i % 0x0ff)) {
1350 printf("Incorrect data in linearized mbuf\n");
1354 rte_pktmbuf_free(mbuf);
1359 rte_pktmbuf_free(mbuf);
1364 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1366 struct test_mbuf_array {
1378 printf("Test mbuf linearize API\n");
1380 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1381 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1382 mbuf_array[i].nb_segs)) {
1383 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1384 mbuf_array[i].nb_segs);
1392 * Helper function for test_tx_ofload
1395 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1396 uint64_t tso, uint64_t ol3, uint64_t ol2)
1401 mb->tso_segsz = tso;
1402 mb->outer_l3_len = ol3;
1403 mb->outer_l2_len = ol2;
1407 test_tx_offload(void)
1409 struct rte_mbuf *mb;
1410 uint64_t tm, v1, v2;
1414 static volatile struct {
1421 const uint32_t num = 0x10000;
1423 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1424 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1425 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1426 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1428 printf("%s started, tx_offload = {\n"
1432 "\ttso_segsz=%#hx,\n"
1433 "\touter_l3_len=%#x,\n"
1434 "\touter_l2_len=%#x,\n"
1437 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1439 sz = sizeof(*mb) * num;
1440 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1442 printf("%s failed, out of memory\n", __func__);
1447 tm = rte_rdtsc_precise();
1449 for (i = 0; i != num; i++)
1450 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1451 txof.tso, txof.l3, txof.l2);
1453 tm = rte_rdtsc_precise() - tm;
1454 printf("%s set tx_offload by bit-fields: %u iterations, %"
1455 PRIu64 " cycles, %#Lf cycles/iter\n",
1456 __func__, num, tm, (long double)tm / num);
1458 v1 = mb[rte_rand() % num].tx_offload;
1461 tm = rte_rdtsc_precise();
1463 for (i = 0; i != num; i++)
1464 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1465 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1467 tm = rte_rdtsc_precise() - tm;
1468 printf("%s set raw tx_offload: %u iterations, %"
1469 PRIu64 " cycles, %#Lf cycles/iter\n",
1470 __func__, num, tm, (long double)tm / num);
1472 v2 = mb[rte_rand() % num].tx_offload;
1476 printf("%s finished\n"
1477 "expected tx_offload value: 0x%" PRIx64 ";\n"
1478 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1481 return (v1 == v2) ? 0 : -EINVAL;
1485 test_get_rx_ol_flag_list(void)
1487 int len = 6, ret = 0;
1491 /* Test case to check with null buffer */
1492 ret = rte_get_rx_ol_flag_list(0, NULL, 0);
1494 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1496 /* Test case to check with zero buffer len */
1497 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
1499 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1501 buflen = strlen(buf);
1503 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1506 /* Test case to check with reduced buffer len */
1507 ret = rte_get_rx_ol_flag_list(0, buf, len);
1509 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1511 buflen = strlen(buf);
1512 if (buflen != (len - 1))
1513 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1514 "received = %d\n", __func__,
1517 /* Test case to check with zero mask value */
1518 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf));
1520 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1522 buflen = strlen(buf);
1524 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1525 "non-zero, buffer should not be empty");
1527 /* Test case to check with valid mask value */
1528 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
1531 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1533 buflen = strlen(buf);
1535 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1536 "non-zero, buffer should not be empty");
1544 test_get_tx_ol_flag_list(void)
1546 int len = 6, ret = 0;
1550 /* Test case to check with null buffer */
1551 ret = rte_get_tx_ol_flag_list(0, NULL, 0);
1553 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1555 /* Test case to check with zero buffer len */
1556 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
1558 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1560 buflen = strlen(buf);
1562 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1566 /* Test case to check with reduced buffer len */
1567 ret = rte_get_tx_ol_flag_list(0, buf, len);
1569 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1571 buflen = strlen(buf);
1572 if (buflen != (len - 1))
1573 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1574 "received = %d\n", __func__,
1577 /* Test case to check with zero mask value */
1578 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf));
1580 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1582 buflen = strlen(buf);
1584 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1585 "non-zero, buffer should not be empty");
1587 /* Test case to check with valid mask value */
1588 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
1591 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1593 buflen = strlen(buf);
1595 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1596 "non-zero, buffer should not be empty");
1610 test_get_rx_ol_flag_name(void)
1613 const char *flag_str = NULL;
1614 const struct flag_name rx_flags[] = {
1615 VAL_NAME(RTE_MBUF_F_RX_VLAN),
1616 VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
1617 VAL_NAME(RTE_MBUF_F_RX_FDIR),
1618 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
1619 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
1620 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
1621 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
1622 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
1623 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
1624 VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
1625 VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
1626 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
1627 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
1628 VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
1629 VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
1630 VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
1631 VAL_NAME(RTE_MBUF_F_RX_LRO),
1632 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
1633 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
1634 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
1635 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
1636 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
1639 /* Test case to check with valid flag */
1640 for (i = 0; i < RTE_DIM(rx_flags); i++) {
1641 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag);
1642 if (flag_str == NULL)
1643 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1644 __func__, rx_flags[i].name);
1645 if (strcmp(flag_str, rx_flags[i].name) != 0)
1646 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1647 __func__, rx_flags[i].name, flag_str);
1649 /* Test case to check with invalid flag */
1650 flag_str = rte_get_rx_ol_flag_name(0);
1651 if (flag_str != NULL) {
1652 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1653 __func__, flag_str);
1662 test_get_tx_ol_flag_name(void)
1665 const char *flag_str = NULL;
1666 const struct flag_name tx_flags[] = {
1667 VAL_NAME(RTE_MBUF_F_TX_VLAN),
1668 VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
1669 VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
1670 VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
1671 VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
1672 VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
1673 VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
1674 VAL_NAME(RTE_MBUF_F_TX_IPV4),
1675 VAL_NAME(RTE_MBUF_F_TX_IPV6),
1676 VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
1677 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
1678 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
1679 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
1680 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
1681 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
1682 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
1683 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
1684 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
1685 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
1686 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
1687 VAL_NAME(RTE_MBUF_F_TX_QINQ),
1688 VAL_NAME(RTE_MBUF_F_TX_MACSEC),
1689 VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
1690 VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
1691 VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
1694 /* Test case to check with valid flag */
1695 for (i = 0; i < RTE_DIM(tx_flags); i++) {
1696 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag);
1697 if (flag_str == NULL)
1698 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1699 __func__, tx_flags[i].name);
1700 if (strcmp(flag_str, tx_flags[i].name) != 0)
1701 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1702 __func__, tx_flags[i].name, flag_str);
1704 /* Test case to check with invalid flag */
1705 flag_str = rte_get_tx_ol_flag_name(0);
1706 if (flag_str != NULL) {
1707 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1708 __func__, flag_str);
1718 test_mbuf_validate_tx_offload(const char *test_name,
1719 struct rte_mempool *pktmbuf_pool,
1722 int expected_retval)
1724 struct rte_mbuf *m = NULL;
1727 /* alloc a mbuf and do sanity check */
1728 m = rte_pktmbuf_alloc(pktmbuf_pool);
1730 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1731 if (rte_pktmbuf_pkt_len(m) != 0)
1732 GOTO_FAIL("%s: Bad packet length\n", __func__);
1733 rte_mbuf_sanity_check(m, 0);
1734 m->ol_flags = ol_flags;
1735 m->tso_segsz = segsize;
1736 ret = rte_validate_tx_offload(m);
1737 if (ret != expected_retval)
1738 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1739 __func__, test_name, expected_retval, ret);
1740 rte_pktmbuf_free(m);
1745 rte_pktmbuf_free(m);
1752 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1754 /* test to validate tx offload flags */
1755 uint64_t ol_flags = 0;
1757 /* test to validate if IP checksum is counted only for IPV4 packet */
1758 /* set both IP checksum and IPV6 flags */
1759 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1760 ol_flags |= RTE_MBUF_F_TX_IPV6;
1761 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1763 ol_flags, 0, -EINVAL) < 0)
1764 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1765 /* resetting ol_flags for next testcase */
1768 /* test to validate if IP type is set when required */
1769 ol_flags |= RTE_MBUF_F_TX_L4_MASK;
1770 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1772 ol_flags, 0, -EINVAL) < 0)
1773 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1775 /* test if IP type is set when TCP SEG is on */
1776 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1777 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1779 ol_flags, 0, -EINVAL) < 0)
1780 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1783 /* test to confirm IP type (IPV4/IPV6) is set */
1784 ol_flags = RTE_MBUF_F_TX_L4_MASK;
1785 ol_flags |= RTE_MBUF_F_TX_IPV6;
1786 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1788 ol_flags, 0, 0) < 0)
1789 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1792 /* test to check TSO segment size is non-zero */
1793 ol_flags |= RTE_MBUF_F_TX_IPV4;
1794 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1795 /* set 0 tso segment size */
1796 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1798 ol_flags, 0, -EINVAL) < 0)
1799 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1801 /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
1802 /* set valid tso segment size but IP CKSUM not set */
1803 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1805 ol_flags, 512, -EINVAL) < 0)
1806 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1808 /* test to validate if IP checksum is set for TSO capability */
1809 /* retain IPV4, TCP_SEG, tso_seg size */
1810 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1811 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1813 ol_flags, 512, 0) < 0)
1814 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1816 /* test to confirm TSO for IPV6 type */
1818 ol_flags |= RTE_MBUF_F_TX_IPV6;
1819 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1820 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1822 ol_flags, 512, 0) < 0)
1823 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1826 /* test if outer IP checksum set for non outer IPv4 packet */
1827 ol_flags |= RTE_MBUF_F_TX_IPV6;
1828 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1829 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1831 ol_flags, 512, -EINVAL) < 0)
1832 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1835 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1836 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1837 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
1838 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1840 ol_flags, 512, 0) < 0)
1841 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1844 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1845 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1847 ol_flags, 512, 0) < 0)
1848 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1855 * Test for allocating a bulk of mbufs
1856 * define an array with positive sizes for mbufs allocations.
1859 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1862 unsigned int idx, loop;
1863 unsigned int alloc_counts[] = {
1865 MEMPOOL_CACHE_SIZE - 1,
1866 MEMPOOL_CACHE_SIZE + 1,
1867 MEMPOOL_CACHE_SIZE * 1.5,
1868 MEMPOOL_CACHE_SIZE * 2,
1869 MEMPOOL_CACHE_SIZE * 2 - 1,
1870 MEMPOOL_CACHE_SIZE * 2 + 1,
1874 /* allocate a large array of mbuf pointers */
1875 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1876 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1877 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1880 for (loop = 0; loop < alloc_counts[idx] &&
1881 mbufs[loop] != NULL; loop++)
1882 rte_pktmbuf_free(mbufs[loop]);
1883 } else if (ret != 0) {
1884 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1885 __func__, alloc_counts[idx], ret);
1893 * Negative testing for allocating a bulk of mbufs
1896 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1899 unsigned int idx, loop;
1900 unsigned int neg_alloc_counts[] = {
1901 MEMPOOL_CACHE_SIZE - NB_MBUF,
1906 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1908 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1909 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1910 neg_alloc_counts[idx]);
1912 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1913 __func__, neg_alloc_counts[idx], ret);
1914 for (loop = 0; loop < neg_alloc_counts[idx] &&
1915 mbufs[loop] != NULL; loop++)
1916 rte_pktmbuf_free(mbufs[loop]);
1924 * Test to read mbuf packet using rte_pktmbuf_read
1927 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1929 struct rte_mbuf *m = NULL;
1931 const char *data_copy = NULL;
1935 m = rte_pktmbuf_alloc(pktmbuf_pool);
1937 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1938 if (rte_pktmbuf_pkt_len(m) != 0)
1939 GOTO_FAIL("%s: Bad packet length\n", __func__);
1940 rte_mbuf_sanity_check(m, 0);
1942 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1944 GOTO_FAIL("%s: Cannot append data\n", __func__);
1945 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1946 GOTO_FAIL("%s: Bad packet length\n", __func__);
1947 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1949 /* read the data from mbuf */
1950 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1951 if (data_copy == NULL)
1952 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1953 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1954 if (data_copy[off] != (char)0xfe)
1955 GOTO_FAIL("Data corrupted at offset %u", off);
1957 rte_pktmbuf_free(m);
1963 rte_pktmbuf_free(m);
1970 * Test to read mbuf packet data from offset
1973 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1975 struct rte_mbuf *m = NULL;
1976 struct ether_hdr *hdr = NULL;
1978 const char *data_copy = NULL;
1980 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1983 m = rte_pktmbuf_alloc(pktmbuf_pool);
1985 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1987 if (rte_pktmbuf_pkt_len(m) != 0)
1988 GOTO_FAIL("%s: Bad packet length\n", __func__);
1989 rte_mbuf_sanity_check(m, 0);
1991 /* prepend an ethernet header */
1992 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1994 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1995 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1996 GOTO_FAIL("%s: Bad pkt length", __func__);
1997 if (rte_pktmbuf_data_len(m) != hdr_len)
1998 GOTO_FAIL("%s: Bad data length", __func__);
1999 memset(hdr, 0xde, hdr_len);
2001 /* read mbuf header info from 0 offset */
2002 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
2003 if (data_copy == NULL)
2004 GOTO_FAIL("%s: Error in reading header!\n", __func__);
2005 for (off = 0; off < hdr_len; off++) {
2006 if (data_copy[off] != (char)0xde)
2007 GOTO_FAIL("Header info corrupted at offset %u", off);
2010 /* append sample data after ethernet header */
2011 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
2013 GOTO_FAIL("%s: Cannot append data\n", __func__);
2014 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2015 GOTO_FAIL("%s: Bad packet length\n", __func__);
2016 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2017 GOTO_FAIL("%s: Bad data length\n", __func__);
2018 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
2020 /* read mbuf data after header info */
2021 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
2022 if (data_copy == NULL)
2023 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
2024 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2025 if (data_copy[off] != (char)0xcc)
2026 GOTO_FAIL("Data corrupted at offset %u", off);
2029 /* partial reading of mbuf data */
2030 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
2032 if (data_copy == NULL)
2033 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2034 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
2035 GOTO_FAIL("%s: Incorrect data length!\n", __func__);
2036 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
2037 if (data_copy[off] != (char)0xcc)
2038 GOTO_FAIL("Data corrupted at offset %u", off);
2041 /* read length greater than mbuf data_len */
2042 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
2044 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
2047 /* read length greater than mbuf pkt_len */
2048 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
2050 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
2053 /* read data of zero len from valid offset */
2054 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
2055 if (data_copy == NULL)
2056 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2057 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
2058 GOTO_FAIL("%s: Corrupted data content!\n", __func__);
2059 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2060 if (data_copy[off] != (char)0xcc)
2061 GOTO_FAIL("Data corrupted at offset %u", off);
2064 /* read data of zero length from zero offset */
2065 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
2066 if (data_copy == NULL)
2067 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2068 /* check if the received address is the beginning of header info */
2069 if (hdr != (const struct ether_hdr *)data_copy)
2070 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2072 /* read data of max length from valid offset */
2073 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
2074 if (data_copy == NULL)
2075 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2076 /* check if the received address is the beginning of data segment */
2077 if (data_copy != data)
2078 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2080 /* try to read from mbuf with max size offset */
2081 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
2082 if (data_copy != NULL)
2083 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2085 /* try to read from mbuf with max size offset and len */
2086 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
2087 if (data_copy != NULL)
2088 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2090 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2092 rte_pktmbuf_free(m);
2098 rte_pktmbuf_free(m);
2105 unsigned int seg_count;
2109 unsigned int seg_lengths[MBUF_MAX_SEG];
2112 /* create a mbuf with different sized segments
2113 * and fill with data [0x00 0x01 0x02 ...]
2115 static struct rte_mbuf *
2116 create_packet(struct rte_mempool *pktmbuf_pool,
2117 struct test_case *test_data)
2119 uint16_t i, ret, seg, seg_len = 0;
2120 uint32_t last_index = 0;
2121 unsigned int seg_lengths[MBUF_MAX_SEG];
2122 unsigned int hdr_len;
2123 struct rte_mbuf *pkt = NULL;
2124 struct rte_mbuf *pkt_seg = NULL;
2128 memcpy(seg_lengths, test_data->seg_lengths,
2129 sizeof(unsigned int)*test_data->seg_count);
2130 for (seg = 0; seg < test_data->seg_count; seg++) {
2132 seg_len = seg_lengths[seg];
2133 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
2134 if (pkt_seg == NULL)
2135 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2136 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
2137 GOTO_FAIL("%s: Bad packet length\n", __func__);
2138 rte_mbuf_sanity_check(pkt_seg, 0);
2139 /* Add header only for the first segment */
2140 if (test_data->flags == MBUF_HEADER && seg == 0) {
2141 hdr_len = sizeof(struct rte_ether_hdr);
2142 /* prepend a header and fill with dummy data */
2143 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
2145 GOTO_FAIL("%s: Cannot prepend header\n",
2147 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
2148 GOTO_FAIL("%s: Bad pkt length", __func__);
2149 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
2150 GOTO_FAIL("%s: Bad data length", __func__);
2151 for (i = 0; i < hdr_len; i++)
2152 hdr[i] = (last_index + i) % 0xffff;
2153 last_index += hdr_len;
2155 /* skip appending segment with 0 length */
2158 data = rte_pktmbuf_append(pkt_seg, seg_len);
2160 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
2161 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
2162 GOTO_FAIL("%s: Bad packet segment length: %d\n",
2163 __func__, rte_pktmbuf_pkt_len(pkt_seg));
2164 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
2165 GOTO_FAIL("%s: Bad data length\n", __func__);
2166 for (i = 0; i < seg_len; i++)
2167 data[i] = (last_index + i) % 0xffff;
2168 /* to fill continuous data from one seg to another */
2170 /* create chained mbufs */
2174 ret = rte_pktmbuf_chain(pkt, pkt_seg);
2176 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
2180 pkt_seg = pkt_seg->next;
2185 rte_pktmbuf_free(pkt);
2188 if (pkt_seg != NULL) {
2189 rte_pktmbuf_free(pkt_seg);
2196 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
2199 struct test_case test_cases[] = {
2201 .seg_lengths = { 100, 100, 100 },
2203 .flags = MBUF_NO_HEADER,
2208 .seg_lengths = { 100, 125, 150 },
2210 .flags = MBUF_NO_HEADER,
2215 .seg_lengths = { 100, 100 },
2217 .flags = MBUF_NO_HEADER,
2222 .seg_lengths = { 100, 200 },
2224 .flags = MBUF_HEADER,
2225 .read_off = sizeof(struct rte_ether_hdr),
2229 .seg_lengths = { 1000, 100 },
2231 .flags = MBUF_NO_HEADER,
2236 .seg_lengths = { 1024, 0, 100 },
2238 .flags = MBUF_NO_HEADER,
2243 .seg_lengths = { 1000, 1, 1000 },
2245 .flags = MBUF_NO_HEADER,
2250 .seg_lengths = { MBUF_TEST_DATA_LEN,
2251 MBUF_TEST_DATA_LEN2,
2252 MBUF_TEST_DATA_LEN3, 800, 10 },
2254 .flags = MBUF_NEG_TEST_READ,
2256 .read_len = MBUF_DATA_SIZE
2261 const char *data_copy = NULL;
2262 char data_buf[MBUF_DATA_SIZE];
2264 memset(data_buf, 0, MBUF_DATA_SIZE);
2266 for (i = 0; i < RTE_DIM(test_cases); i++) {
2267 m = create_packet(pktmbuf_pool, &test_cases[i]);
2269 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2271 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2272 test_cases[i].read_len, data_buf);
2273 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2274 if (data_copy != NULL)
2275 GOTO_FAIL("%s: mbuf data read should fail!\n",
2278 rte_pktmbuf_free(m);
2283 if (data_copy == NULL)
2284 GOTO_FAIL("%s: Error in reading packet data!\n",
2286 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2287 if (data_copy[pos] !=
2288 (char)((test_cases[i].read_off + pos)
2290 GOTO_FAIL("Data corrupted at offset %u is %2X",
2291 pos, data_copy[pos]);
2293 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2294 rte_pktmbuf_free(m);
2301 rte_pktmbuf_free(m);
2307 /* Define a free call back function to be used for external buffer */
2309 ext_buf_free_callback_fn(void *addr, void *opaque)
2311 bool *freed = opaque;
2314 printf("External buffer address is invalid\n");
2319 printf("External buffer freed via callback\n");
2323 * Test to initialize shared data in external buffer before attaching to mbuf
2324 * - Allocate mbuf with no data.
2325 * - Allocate external buffer with size should be large enough to accommodate
2326 * rte_mbuf_ext_shared_info.
2327 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2328 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2329 * - Clone another mbuf and attach the same external buffer to it.
2330 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2333 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2335 struct rte_mbuf *m = NULL;
2336 struct rte_mbuf *clone = NULL;
2337 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2338 rte_iova_t buf_iova;
2339 void *ext_buf_addr = NULL;
2340 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2341 sizeof(struct rte_mbuf_ext_shared_info);
2345 m = rte_pktmbuf_alloc(pktmbuf_pool);
2347 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2348 if (rte_pktmbuf_pkt_len(m) != 0)
2349 GOTO_FAIL("%s: Bad packet length\n", __func__);
2350 rte_mbuf_sanity_check(m, 0);
2352 ext_buf_addr = rte_malloc("External buffer", buf_len,
2353 RTE_CACHE_LINE_SIZE);
2354 if (ext_buf_addr == NULL)
2355 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2357 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2358 ext_buf_free_callback_fn, &freed);
2359 if (ret_shinfo == NULL)
2360 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2362 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2363 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2365 if (rte_mbuf_refcnt_read(m) != 1)
2366 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2368 buf_iova = rte_mem_virt2iova(ext_buf_addr);
2369 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2371 if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
2372 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2375 /* allocate one more mbuf */
2376 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2378 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2379 if (rte_pktmbuf_pkt_len(clone) != 0)
2380 GOTO_FAIL("%s: Bad packet length\n", __func__);
2382 /* attach the same external buffer to the cloned mbuf */
2383 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
2385 if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
2386 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2389 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2390 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2392 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2394 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2395 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2396 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2397 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2399 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2401 /* reset the ext_refcnt before freeing the external buffer */
2402 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2403 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2404 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2406 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2408 /* detach the external buffer from mbufs */
2409 rte_pktmbuf_detach_extbuf(m);
2410 /* check if ref cnt is decremented */
2411 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2412 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2414 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2416 rte_pktmbuf_detach_extbuf(clone);
2418 GOTO_FAIL("%s: extbuf should be freed\n", __func__);
2421 rte_pktmbuf_free(m);
2423 rte_pktmbuf_free(clone);
2430 rte_pktmbuf_free(m);
2434 rte_pktmbuf_free(clone);
2437 if (ext_buf_addr != NULL) {
2438 rte_free(ext_buf_addr);
2439 ext_buf_addr = NULL;
2445 * Test the mbuf pool with pinned external data buffers
2446 * - Allocate memory zone for external buffer
2447 * - Create the mbuf pool with pinned external buffer
2448 * - Check the created pool with relevant mbuf pool unit tests
2451 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool)
2454 struct rte_pktmbuf_extmem ext_mem;
2455 struct rte_mempool *pinned_pool = NULL;
2456 const struct rte_memzone *mz = NULL;
2458 printf("Test mbuf pool with external pinned data buffers\n");
2460 /* Allocate memzone for the external data buffer */
2461 mz = rte_memzone_reserve("pinned_pool",
2462 NB_MBUF * MBUF_DATA_SIZE,
2464 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY);
2466 GOTO_FAIL("%s: Memzone allocation failed\n", __func__);
2468 /* Create the mbuf pool with pinned external data buffer */
2469 ext_mem.buf_ptr = mz->addr;
2470 ext_mem.buf_iova = mz->iova;
2471 ext_mem.buf_len = mz->len;
2472 ext_mem.elt_size = MBUF_DATA_SIZE;
2474 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool",
2475 NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
2476 MBUF_DATA_SIZE, SOCKET_ID_ANY,
2478 if (pinned_pool == NULL)
2479 GOTO_FAIL("%s: Mbuf pool with pinned external"
2480 " buffer creation failed\n", __func__);
2481 /* test multiple mbuf alloc */
2482 if (test_pktmbuf_pool(pinned_pool) < 0)
2483 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n",
2486 /* do it another time to check that all mbufs were freed */
2487 if (test_pktmbuf_pool(pinned_pool) < 0)
2488 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n",
2491 /* test that the data pointer on a packet mbuf is set properly */
2492 if (test_pktmbuf_pool_ptr(pinned_pool) < 0)
2493 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n",
2496 /* test data manipulation in mbuf with non-ascii data */
2497 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0)
2498 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)"
2499 " failed\n", __func__);
2501 /* test free pktmbuf segment one by one */
2502 if (test_pktmbuf_free_segment(pinned_pool) < 0)
2503 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n",
2506 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0)
2507 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n",
2510 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0)
2511 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n",
2514 if (test_failing_mbuf_sanity_check(pinned_pool) < 0)
2515 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)"
2516 " failed\n", __func__);
2518 if (test_mbuf_linearize_check(pinned_pool) < 0)
2519 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n",
2522 /* test for allocating a bulk of mbufs with various sizes */
2523 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0)
2524 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n",
2527 /* test for allocating a bulk of mbufs with various sizes */
2528 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0)
2529 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)"
2530 " failed\n", __func__);
2532 /* test to read mbuf packet */
2533 if (test_pktmbuf_read(pinned_pool) < 0)
2534 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n",
2537 /* test to read mbuf packet from offset */
2538 if (test_pktmbuf_read_from_offset(pinned_pool) < 0)
2539 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)"
2540 " failed\n", __func__);
2542 /* test to read data from chain of mbufs with data segments */
2543 if (test_pktmbuf_read_from_chain(pinned_pool) < 0)
2544 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)"
2545 " failed\n", __func__);
2547 RTE_SET_USED(std_pool);
2548 rte_mempool_free(pinned_pool);
2549 rte_memzone_free(mz);
2553 rte_mempool_free(pinned_pool);
2554 rte_memzone_free(mz);
2559 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
2561 const struct rte_mbuf_dynfield dynfield = {
2562 .name = "test-dynfield",
2563 .size = sizeof(uint8_t),
2564 .align = __alignof__(uint8_t),
2567 const struct rte_mbuf_dynfield dynfield2 = {
2568 .name = "test-dynfield2",
2569 .size = sizeof(uint16_t),
2570 .align = __alignof__(uint16_t),
2573 const struct rte_mbuf_dynfield dynfield3 = {
2574 .name = "test-dynfield3",
2575 .size = sizeof(uint8_t),
2576 .align = __alignof__(uint8_t),
2579 const struct rte_mbuf_dynfield dynfield_fail_big = {
2580 .name = "test-dynfield-fail-big",
2585 const struct rte_mbuf_dynfield dynfield_fail_align = {
2586 .name = "test-dynfield-fail-align",
2591 const struct rte_mbuf_dynfield dynfield_fail_flag = {
2592 .name = "test-dynfield",
2593 .size = sizeof(uint8_t),
2594 .align = __alignof__(uint8_t),
2597 const struct rte_mbuf_dynflag dynflag_fail_flag = {
2598 .name = "test-dynflag",
2601 const struct rte_mbuf_dynflag dynflag = {
2602 .name = "test-dynflag",
2605 const struct rte_mbuf_dynflag dynflag2 = {
2606 .name = "test-dynflag2",
2609 const struct rte_mbuf_dynflag dynflag3 = {
2610 .name = "test-dynflag3",
2613 struct rte_mbuf *m = NULL;
2614 int offset, offset2, offset3;
2615 int flag, flag2, flag3;
2618 printf("Test mbuf dynamic fields and flags\n");
2619 rte_mbuf_dyn_dump(stdout);
2621 offset = rte_mbuf_dynfield_register(&dynfield);
2623 GOTO_FAIL("failed to register dynamic field, offset=%d: %s",
2624 offset, strerror(errno));
2626 ret = rte_mbuf_dynfield_register(&dynfield);
2628 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s",
2629 ret, strerror(errno));
2631 offset2 = rte_mbuf_dynfield_register(&dynfield2);
2632 if (offset2 == -1 || offset2 == offset || (offset2 & 1))
2633 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s",
2634 offset2, strerror(errno));
2636 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
2637 offsetof(struct rte_mbuf, dynfield1[1]));
2638 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
2639 if (rte_errno == EBUSY)
2640 printf("mbuf test error skipped: dynfield is busy\n");
2642 GOTO_FAIL("failed to register dynamic field 3, offset="
2643 "%d: %s", offset3, strerror(errno));
2646 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
2647 offset, offset2, offset3);
2649 ret = rte_mbuf_dynfield_register(&dynfield_fail_big);
2651 GOTO_FAIL("dynamic field creation should fail (too big)");
2653 ret = rte_mbuf_dynfield_register(&dynfield_fail_align);
2655 GOTO_FAIL("dynamic field creation should fail (bad alignment)");
2657 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align,
2658 offsetof(struct rte_mbuf, ol_flags));
2660 GOTO_FAIL("dynamic field creation should fail (not avail)");
2662 ret = rte_mbuf_dynfield_register(&dynfield_fail_flag);
2664 GOTO_FAIL("dynamic field creation should fail (invalid flag)");
2666 ret = rte_mbuf_dynflag_register(&dynflag_fail_flag);
2668 GOTO_FAIL("dynamic flag creation should fail (invalid flag)");
2670 flag = rte_mbuf_dynflag_register(&dynflag);
2672 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s",
2673 flag, strerror(errno));
2675 ret = rte_mbuf_dynflag_register(&dynflag);
2677 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s",
2678 ret, strerror(errno));
2680 flag2 = rte_mbuf_dynflag_register(&dynflag2);
2681 if (flag2 == -1 || flag2 == flag)
2682 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s",
2683 flag2, strerror(errno));
2685 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
2686 rte_bsf64(RTE_MBUF_F_LAST_FREE));
2687 if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
2688 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
2689 flag3, strerror(errno));
2691 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
2693 /* set, get dynamic field */
2694 m = rte_pktmbuf_alloc(pktmbuf_pool);
2696 GOTO_FAIL("Cannot allocate mbuf");
2698 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1;
2699 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1)
2700 GOTO_FAIL("failed to read dynamic field");
2701 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000;
2702 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000)
2703 GOTO_FAIL("failed to read dynamic field");
2705 /* set a dynamic flag */
2706 m->ol_flags |= (1ULL << flag);
2708 rte_mbuf_dyn_dump(stdout);
2709 rte_pktmbuf_free(m);
2712 rte_pktmbuf_free(m);
2716 /* check that m->nb_segs and m->next are reset on mbuf free */
2718 test_nb_segs_and_next_reset(void)
2720 struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
2721 struct rte_mempool *pool = NULL;
2723 pool = rte_pktmbuf_pool_create("test_mbuf_reset",
2724 3, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
2726 GOTO_FAIL("Failed to create mbuf pool");
2729 m0 = rte_pktmbuf_alloc(pool);
2730 m1 = rte_pktmbuf_alloc(pool);
2731 m2 = rte_pktmbuf_alloc(pool);
2732 if (m0 == NULL || m1 == NULL || m2 == NULL)
2733 GOTO_FAIL("Failed to allocate mbuf");
2735 /* append data in all of them */
2736 if (rte_pktmbuf_append(m0, 500) == NULL ||
2737 rte_pktmbuf_append(m1, 500) == NULL ||
2738 rte_pktmbuf_append(m2, 500) == NULL)
2739 GOTO_FAIL("Failed to append data in mbuf");
2741 /* chain them in one mbuf m0 */
2742 rte_pktmbuf_chain(m1, m2);
2743 rte_pktmbuf_chain(m0, m1);
2744 if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
2747 GOTO_FAIL("Failed to chain mbufs");
2750 /* split m0 chain in two, between m1 and m2 */
2755 /* free the 2 mbuf chains m0 and m2 */
2756 rte_pktmbuf_free(m0);
2757 rte_pktmbuf_free(m2);
2759 /* realloc the 3 mbufs */
2760 m0 = rte_mbuf_raw_alloc(pool);
2761 m1 = rte_mbuf_raw_alloc(pool);
2762 m2 = rte_mbuf_raw_alloc(pool);
2763 if (m0 == NULL || m1 == NULL || m2 == NULL)
2764 GOTO_FAIL("Failed to reallocate mbuf");
2766 /* ensure that m->next and m->nb_segs are reset allocated mbufs */
2767 if (m0->nb_segs != 1 || m0->next != NULL ||
2768 m1->nb_segs != 1 || m1->next != NULL ||
2769 m2->nb_segs != 1 || m2->next != NULL)
2770 GOTO_FAIL("nb_segs or next was not reset properly");
2776 rte_mempool_free(pool);
2784 struct rte_mempool *pktmbuf_pool = NULL;
2785 struct rte_mempool *pktmbuf_pool2 = NULL;
2788 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2790 /* create pktmbuf pool if it does not exist */
2791 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2792 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2795 if (pktmbuf_pool == NULL) {
2796 printf("cannot allocate mbuf pool\n");
2800 /* test registration of dynamic fields and flags */
2801 if (test_mbuf_dyn(pktmbuf_pool) < 0) {
2802 printf("mbuf dynflag test failed\n");
2806 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2808 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2809 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2812 if (pktmbuf_pool2 == NULL) {
2813 printf("cannot allocate mbuf pool\n");
2817 /* test multiple mbuf alloc */
2818 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2819 printf("test_mbuf_pool() failed\n");
2823 /* do it another time to check that all mbufs were freed */
2824 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2825 printf("test_mbuf_pool() failed (2)\n");
2829 /* test bulk mbuf alloc and free */
2830 if (test_pktmbuf_pool_bulk() < 0) {
2831 printf("test_pktmbuf_pool_bulk() failed\n");
2835 /* test that the pointer to the data on a packet mbuf is set properly */
2836 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2837 printf("test_pktmbuf_pool_ptr() failed\n");
2841 /* test data manipulation in mbuf */
2842 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2843 printf("test_one_mbuf() failed\n");
2849 * do it another time, to check that allocation reinitialize
2850 * the mbuf correctly
2852 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2853 printf("test_one_mbuf() failed (2)\n");
2857 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2858 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2862 /* test free pktmbuf segment one by one */
2863 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2864 printf("test_pktmbuf_free_segment() failed.\n");
2868 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) {
2869 printf("testclone_and_testupdate() failed \n");
2873 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) {
2874 printf("test_pktmbuf_copy() failed\n");
2878 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2879 printf("test_attach_from_different_pool() failed\n");
2883 if (test_refcnt_mbuf() < 0) {
2884 printf("test_refcnt_mbuf() failed \n");
2888 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2889 printf("test_failing_mbuf_sanity_check() failed\n");
2893 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2894 printf("test_mbuf_linearize_check() failed\n");
2898 if (test_tx_offload() < 0) {
2899 printf("test_tx_offload() failed\n");
2903 if (test_get_rx_ol_flag_list() < 0) {
2904 printf("test_rte_get_rx_ol_flag_list() failed\n");
2908 if (test_get_tx_ol_flag_list() < 0) {
2909 printf("test_rte_get_tx_ol_flag_list() failed\n");
2913 if (test_get_rx_ol_flag_name() < 0) {
2914 printf("test_rte_get_rx_ol_flag_name() failed\n");
2918 if (test_get_tx_ol_flag_name() < 0) {
2919 printf("test_rte_get_tx_ol_flag_name() failed\n");
2923 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2924 printf("test_mbuf_validate_tx_offload_one() failed\n");
2928 /* test for allocating a bulk of mbufs with various sizes */
2929 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2930 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2934 /* test for allocating a bulk of mbufs with various sizes */
2935 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2936 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2940 /* test to read mbuf packet */
2941 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2942 printf("test_rte_pktmbuf_read() failed\n");
2946 /* test to read mbuf packet from offset */
2947 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2948 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2952 /* test to read data from chain of mbufs with data segments */
2953 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2954 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2958 /* test to initialize shared info. at the end of external buffer */
2959 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2960 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2964 /* test the mbuf pool with pinned external data buffers */
2965 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) {
2966 printf("test_pktmbuf_ext_pinned_buffer() failed\n");
2970 /* test reset of m->nb_segs and m->next on mbuf free */
2971 if (test_nb_segs_and_next_reset() < 0) {
2972 printf("test_nb_segs_and_next_reset() failed\n");
2978 rte_mempool_free(pktmbuf_pool);
2979 rte_mempool_free(pktmbuf_pool2);
2984 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);