1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_errno.h>
16 #include <rte_debug.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
27 #include <rte_mempool.h>
29 #include <rte_random.h>
30 #include <rte_cycles.h>
31 #include <rte_malloc.h>
32 #include <rte_ether.h>
35 #include <rte_mbuf_dyn.h>
39 #define MEMPOOL_CACHE_SIZE 32
40 #define MBUF_DATA_SIZE 2048
42 #define MBUF_TEST_DATA_LEN 1464
43 #define MBUF_TEST_DATA_LEN2 50
44 #define MBUF_TEST_DATA_LEN3 256
45 #define MBUF_TEST_HDR1_LEN 20
46 #define MBUF_TEST_HDR2_LEN 30
47 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
48 #define MBUF_TEST_SEG_SIZE 64
49 #define MBUF_TEST_BURST 8
50 #define EXT_BUF_TEST_DATA_LEN 1024
51 #define MBUF_MAX_SEG 16
52 #define MBUF_NO_HEADER 0
54 #define MBUF_NEG_TEST_READ 2
55 #define VAL_NAME(flag) { flag, #flag }
57 /* chain length in bulk test */
60 /* size of private data for mbuf in pktmbuf_pool2 */
61 #define MBUF2_PRIV_SIZE 128
63 #define REFCNT_MAX_ITER 64
64 #define REFCNT_MAX_TIMEOUT 10
65 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
66 #define REFCNT_MBUF_NUM 64
67 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
69 #define MAGIC_DATA 0x42424242
71 #define MAKE_STRING(x) # x
73 #ifdef RTE_MBUF_REFCNT_ATOMIC
75 static volatile uint32_t refcnt_stop_workers;
76 static unsigned refcnt_lcore[RTE_MAX_LCORE];
84 * #. Allocate a mbuf pool.
86 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
89 * #. Test multiple allocations of mbufs from this pool.
91 * - Allocate NB_MBUF and store pointers in a table.
92 * - If an allocation fails, return an error.
93 * - Free all these mbufs.
94 * - Repeat the same test to check that mbufs were freed correctly.
96 * #. Test data manipulation in pktmbuf.
99 * - Append data using rte_pktmbuf_append().
100 * - Test for error in rte_pktmbuf_append() when len is too large.
101 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
102 * - Test for error in rte_pktmbuf_trim() when len is too large.
103 * - Prepend a header using rte_pktmbuf_prepend().
104 * - Test for error in rte_pktmbuf_prepend() when len is too large.
105 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
106 * - Test for error in rte_pktmbuf_adj() when len is too large.
107 * - Check that appended data is not corrupt.
109 * - Between all these tests, check data_len and pkt_len, and
110 * that the mbuf is contiguous.
111 * - Repeat the test to check that allocation operations
112 * reinitialize the mbuf correctly.
114 * #. Test packet cloning
115 * - Clone a mbuf and verify the data
116 * - Clone the cloned mbuf and verify the data
117 * - Attach a mbuf to another that does not have the same priv_size.
120 #define GOTO_FAIL(str, ...) do { \
121 printf("mbuf test FAILED (l.%d): <" str ">\n", \
122 __LINE__, ##__VA_ARGS__); \
127 * test data manipulation in mbuf with non-ascii data
130 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
132 struct rte_mbuf *m = NULL;
135 m = rte_pktmbuf_alloc(pktmbuf_pool);
137 GOTO_FAIL("Cannot allocate mbuf");
138 if (rte_pktmbuf_pkt_len(m) != 0)
139 GOTO_FAIL("Bad length");
141 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
143 GOTO_FAIL("Cannot append data");
144 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
145 GOTO_FAIL("Bad pkt length");
146 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
147 GOTO_FAIL("Bad data length");
148 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
149 if (!rte_pktmbuf_is_contiguous(m))
150 GOTO_FAIL("Buffer should be continuous");
151 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
165 * test data manipulation in mbuf
168 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
170 struct rte_mbuf *m = NULL;
171 char *data, *data2, *hdr;
174 printf("Test pktmbuf API\n");
178 m = rte_pktmbuf_alloc(pktmbuf_pool);
180 GOTO_FAIL("Cannot allocate mbuf");
181 if (rte_pktmbuf_pkt_len(m) != 0)
182 GOTO_FAIL("Bad length");
184 rte_pktmbuf_dump(stdout, m, 0);
188 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
190 GOTO_FAIL("Cannot append data");
191 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
192 GOTO_FAIL("Bad pkt length");
193 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
194 GOTO_FAIL("Bad data length");
195 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
196 if (!rte_pktmbuf_is_contiguous(m))
197 GOTO_FAIL("Buffer should be continuous");
198 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
199 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
201 /* this append should fail */
203 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
205 GOTO_FAIL("Append should not succeed");
207 /* append some more data */
209 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
211 GOTO_FAIL("Cannot append data");
212 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
213 GOTO_FAIL("Bad pkt length");
214 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
215 GOTO_FAIL("Bad data length");
216 if (!rte_pktmbuf_is_contiguous(m))
217 GOTO_FAIL("Buffer should be continuous");
219 /* trim data at the end of mbuf */
221 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
222 GOTO_FAIL("Cannot trim data");
223 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
224 GOTO_FAIL("Bad pkt length");
225 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
226 GOTO_FAIL("Bad data length");
227 if (!rte_pktmbuf_is_contiguous(m))
228 GOTO_FAIL("Buffer should be continuous");
230 /* this trim should fail */
232 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
233 GOTO_FAIL("trim should not succeed");
235 /* prepend one header */
237 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
239 GOTO_FAIL("Cannot prepend");
240 if (data - hdr != MBUF_TEST_HDR1_LEN)
241 GOTO_FAIL("Prepend failed");
242 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
243 GOTO_FAIL("Bad pkt length");
244 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
245 GOTO_FAIL("Bad data length");
246 if (!rte_pktmbuf_is_contiguous(m))
247 GOTO_FAIL("Buffer should be continuous");
248 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
250 /* prepend another header */
252 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
254 GOTO_FAIL("Cannot prepend");
255 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
256 GOTO_FAIL("Prepend failed");
257 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
258 GOTO_FAIL("Bad pkt length");
259 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
260 GOTO_FAIL("Bad data length");
261 if (!rte_pktmbuf_is_contiguous(m))
262 GOTO_FAIL("Buffer should be continuous");
263 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
265 rte_mbuf_sanity_check(m, 1);
266 rte_mbuf_sanity_check(m, 0);
267 rte_pktmbuf_dump(stdout, m, 0);
269 /* this prepend should fail */
271 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
273 GOTO_FAIL("prepend should not succeed");
275 /* remove data at beginning of mbuf (adj) */
277 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
278 GOTO_FAIL("rte_pktmbuf_adj failed");
279 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
280 GOTO_FAIL("Bad pkt length");
281 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
282 GOTO_FAIL("Bad data length");
283 if (!rte_pktmbuf_is_contiguous(m))
284 GOTO_FAIL("Buffer should be continuous");
286 /* this adj should fail */
288 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
289 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
293 if (!rte_pktmbuf_is_contiguous(m))
294 GOTO_FAIL("Buffer should be continuous");
296 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
298 GOTO_FAIL("Data corrupted at offset %u", i);
314 testclone_refcnt_read(struct rte_mbuf *m)
316 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ?
317 rte_mbuf_ext_refcnt_read(m->shinfo) :
318 rte_mbuf_refcnt_read(m);
322 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool,
323 struct rte_mempool *clone_pool)
325 struct rte_mbuf *m = NULL;
326 struct rte_mbuf *clone = NULL;
327 struct rte_mbuf *clone2 = NULL;
328 unaligned_uint32_t *data;
331 m = rte_pktmbuf_alloc(pktmbuf_pool);
333 GOTO_FAIL("ooops not allocating mbuf");
335 if (rte_pktmbuf_pkt_len(m) != 0)
336 GOTO_FAIL("Bad length");
338 rte_pktmbuf_append(m, sizeof(uint32_t));
339 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
342 /* clone the allocated mbuf */
343 clone = rte_pktmbuf_clone(m, clone_pool);
345 GOTO_FAIL("cannot clone data\n");
347 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
348 if (*data != MAGIC_DATA)
349 GOTO_FAIL("invalid data in clone\n");
351 if (testclone_refcnt_read(m) != 2)
352 GOTO_FAIL("invalid refcnt in m\n");
355 rte_pktmbuf_free(clone);
358 /* same test with a chained mbuf */
359 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
361 GOTO_FAIL("Next Pkt Null\n");
364 rte_pktmbuf_append(m->next, sizeof(uint32_t));
365 m->pkt_len = 2 * sizeof(uint32_t);
367 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
370 clone = rte_pktmbuf_clone(m, clone_pool);
372 GOTO_FAIL("cannot clone data\n");
374 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
375 if (*data != MAGIC_DATA)
376 GOTO_FAIL("invalid data in clone\n");
378 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
379 if (*data != MAGIC_DATA)
380 GOTO_FAIL("invalid data in clone->next\n");
382 if (testclone_refcnt_read(m) != 2)
383 GOTO_FAIL("invalid refcnt in m\n");
385 if (testclone_refcnt_read(m->next) != 2)
386 GOTO_FAIL("invalid refcnt in m->next\n");
388 /* try to clone the clone */
390 clone2 = rte_pktmbuf_clone(clone, clone_pool);
392 GOTO_FAIL("cannot clone the clone\n");
394 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
395 if (*data != MAGIC_DATA)
396 GOTO_FAIL("invalid data in clone2\n");
398 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
399 if (*data != MAGIC_DATA)
400 GOTO_FAIL("invalid data in clone2->next\n");
402 if (testclone_refcnt_read(m) != 3)
403 GOTO_FAIL("invalid refcnt in m\n");
405 if (testclone_refcnt_read(m->next) != 3)
406 GOTO_FAIL("invalid refcnt in m->next\n");
410 rte_pktmbuf_free(clone);
411 rte_pktmbuf_free(clone2);
416 printf("%s ok\n", __func__);
423 rte_pktmbuf_free(clone);
425 rte_pktmbuf_free(clone2);
430 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool,
431 struct rte_mempool *clone_pool)
433 struct rte_mbuf *m = NULL;
434 struct rte_mbuf *copy = NULL;
435 struct rte_mbuf *copy2 = NULL;
436 struct rte_mbuf *clone = NULL;
437 unaligned_uint32_t *data;
440 m = rte_pktmbuf_alloc(pktmbuf_pool);
442 GOTO_FAIL("ooops not allocating mbuf");
444 if (rte_pktmbuf_pkt_len(m) != 0)
445 GOTO_FAIL("Bad length");
447 rte_pktmbuf_append(m, sizeof(uint32_t));
448 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
451 /* copy the allocated mbuf */
452 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
454 GOTO_FAIL("cannot copy data\n");
456 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
457 GOTO_FAIL("copy length incorrect\n");
459 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
460 GOTO_FAIL("copy data length incorrect\n");
462 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
463 if (*data != MAGIC_DATA)
464 GOTO_FAIL("invalid data in copy\n");
467 rte_pktmbuf_free(copy);
470 /* same test with a cloned mbuf */
471 clone = rte_pktmbuf_clone(m, clone_pool);
473 GOTO_FAIL("cannot clone data\n");
475 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
476 !RTE_MBUF_CLONED(clone)) ||
477 (RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
478 !RTE_MBUF_HAS_EXTBUF(clone)))
479 GOTO_FAIL("clone did not give a cloned mbuf\n");
481 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
483 GOTO_FAIL("cannot copy cloned mbuf\n");
485 if (RTE_MBUF_CLONED(copy))
486 GOTO_FAIL("copy of clone is cloned?\n");
488 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
489 GOTO_FAIL("copy clone length incorrect\n");
491 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
492 GOTO_FAIL("copy clone data length incorrect\n");
494 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
495 if (*data != MAGIC_DATA)
496 GOTO_FAIL("invalid data in clone copy\n");
497 rte_pktmbuf_free(clone);
498 rte_pktmbuf_free(copy);
503 /* same test with a chained mbuf */
504 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
506 GOTO_FAIL("Next Pkt Null\n");
509 rte_pktmbuf_append(m->next, sizeof(uint32_t));
510 m->pkt_len = 2 * sizeof(uint32_t);
511 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
512 *data = MAGIC_DATA + 1;
514 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
516 GOTO_FAIL("cannot copy data\n");
518 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
519 GOTO_FAIL("chain copy length incorrect\n");
521 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
522 GOTO_FAIL("chain copy data length incorrect\n");
524 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
525 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
526 GOTO_FAIL("invalid data in copy\n");
528 rte_pktmbuf_free(copy2);
530 /* test offset copy */
531 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
532 sizeof(uint32_t), UINT32_MAX);
534 GOTO_FAIL("cannot copy the copy\n");
536 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
537 GOTO_FAIL("copy with offset, length incorrect\n");
539 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
540 GOTO_FAIL("copy with offset, data length incorrect\n");
542 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
543 if (data[0] != MAGIC_DATA + 1)
544 GOTO_FAIL("copy with offset, invalid data\n");
546 rte_pktmbuf_free(copy2);
548 /* test truncation copy */
549 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
550 0, sizeof(uint32_t));
552 GOTO_FAIL("cannot copy the copy\n");
554 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
555 GOTO_FAIL("copy with truncate, length incorrect\n");
557 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
558 GOTO_FAIL("copy with truncate, data length incorrect\n");
560 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
561 if (data[0] != MAGIC_DATA)
562 GOTO_FAIL("copy with truncate, invalid data\n");
566 rte_pktmbuf_free(copy);
567 rte_pktmbuf_free(copy2);
572 printf("%s ok\n", __func__);
579 rte_pktmbuf_free(copy);
581 rte_pktmbuf_free(copy2);
586 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
587 struct rte_mempool *pktmbuf_pool2)
589 struct rte_mbuf *m = NULL;
590 struct rte_mbuf *clone = NULL;
591 struct rte_mbuf *clone2 = NULL;
592 char *data, *c_data, *c_data2;
595 m = rte_pktmbuf_alloc(pktmbuf_pool);
597 GOTO_FAIL("cannot allocate mbuf");
599 if (rte_pktmbuf_pkt_len(m) != 0)
600 GOTO_FAIL("Bad length");
602 data = rte_pktmbuf_mtod(m, char *);
604 /* allocate a new mbuf from the second pool, and attach it to the first
606 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
608 GOTO_FAIL("cannot allocate mbuf from second pool\n");
610 /* check data room size and priv size, and erase priv */
611 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
612 GOTO_FAIL("data room size should be 0\n");
613 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
614 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
615 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
617 /* save data pointer to compare it after detach() */
618 c_data = rte_pktmbuf_mtod(clone, char *);
619 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
620 GOTO_FAIL("bad data pointer in clone");
621 if (rte_pktmbuf_headroom(clone) != 0)
622 GOTO_FAIL("bad headroom in clone");
624 rte_pktmbuf_attach(clone, m);
626 if (rte_pktmbuf_mtod(clone, char *) != data)
627 GOTO_FAIL("clone was not attached properly\n");
628 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
629 GOTO_FAIL("bad headroom in clone after attach");
630 if (rte_mbuf_refcnt_read(m) != 2)
631 GOTO_FAIL("invalid refcnt in m\n");
633 /* allocate a new mbuf from the second pool, and attach it to the first
635 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
637 GOTO_FAIL("cannot allocate clone2 from second pool\n");
639 /* check data room size and priv size, and erase priv */
640 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
641 GOTO_FAIL("data room size should be 0\n");
642 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
643 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
644 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
646 /* save data pointer to compare it after detach() */
647 c_data2 = rte_pktmbuf_mtod(clone2, char *);
648 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
649 GOTO_FAIL("bad data pointer in clone2");
650 if (rte_pktmbuf_headroom(clone2) != 0)
651 GOTO_FAIL("bad headroom in clone2");
653 rte_pktmbuf_attach(clone2, clone);
655 if (rte_pktmbuf_mtod(clone2, char *) != data)
656 GOTO_FAIL("clone2 was not attached properly\n");
657 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
658 GOTO_FAIL("bad headroom in clone2 after attach");
659 if (rte_mbuf_refcnt_read(m) != 3)
660 GOTO_FAIL("invalid refcnt in m\n");
662 /* detach the clones */
663 rte_pktmbuf_detach(clone);
664 if (c_data != rte_pktmbuf_mtod(clone, char *))
665 GOTO_FAIL("clone was not detached properly\n");
666 if (rte_mbuf_refcnt_read(m) != 2)
667 GOTO_FAIL("invalid refcnt in m\n");
669 rte_pktmbuf_detach(clone2);
670 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
671 GOTO_FAIL("clone2 was not detached properly\n");
672 if (rte_mbuf_refcnt_read(m) != 1)
673 GOTO_FAIL("invalid refcnt in m\n");
675 /* free the clones and the initial mbuf */
676 rte_pktmbuf_free(clone2);
677 rte_pktmbuf_free(clone);
679 printf("%s ok\n", __func__);
686 rte_pktmbuf_free(clone);
688 rte_pktmbuf_free(clone2);
693 * test allocation and free of mbufs
696 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
699 struct rte_mbuf *m[NB_MBUF];
702 for (i=0; i<NB_MBUF; i++)
705 /* alloc NB_MBUF mbufs */
706 for (i=0; i<NB_MBUF; i++) {
707 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
709 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
713 struct rte_mbuf *extra = NULL;
714 extra = rte_pktmbuf_alloc(pktmbuf_pool);
716 printf("Error pool not empty");
719 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
721 printf("Error pool not empty");
725 for (i=0; i<NB_MBUF; i++) {
727 rte_pktmbuf_free(m[i]);
734 * test bulk allocation and bulk free of mbufs
737 test_pktmbuf_pool_bulk(void)
739 struct rte_mempool *pool = NULL;
740 struct rte_mempool *pool2 = NULL;
743 struct rte_mbuf *mbufs[NB_MBUF];
746 /* We cannot use the preallocated mbuf pools because their caches
747 * prevent us from bulk allocating all objects in them.
748 * So we create our own mbuf pools without caches.
750 printf("Create mbuf pools for bulk allocation.\n");
751 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
752 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
754 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
758 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
759 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
761 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
766 /* Preconditions: Mempools must be full. */
767 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
768 printf("Test precondition failed: mempools not full\n");
771 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
772 rte_mempool_avail_count(pool2) == NB_MBUF)) {
773 printf("Test precondition failed: mempools: %u+%u != %u+%u",
774 rte_mempool_avail_count(pool),
775 rte_mempool_avail_count(pool2),
780 printf("Test single bulk alloc, followed by multiple bulk free.\n");
782 /* Bulk allocate all mbufs in the pool, in one go. */
783 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
785 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
788 /* Test that they have been removed from the pool. */
789 if (!rte_mempool_empty(pool)) {
790 printf("mempool not empty\n");
793 /* Bulk free all mbufs, in four steps. */
794 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
795 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
796 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
797 /* Test that they have been returned to the pool. */
798 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
799 printf("mempool avail count incorrect\n");
804 printf("Test multiple bulk alloc, followed by single bulk free.\n");
806 /* Bulk allocate all mbufs in the pool, in four steps. */
807 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
808 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
810 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
814 /* Test that they have been removed from the pool. */
815 if (!rte_mempool_empty(pool)) {
816 printf("mempool not empty\n");
819 /* Bulk free all mbufs, in one go. */
820 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
821 /* Test that they have been returned to the pool. */
822 if (!rte_mempool_full(pool)) {
823 printf("mempool not full\n");
827 printf("Test bulk free of single long chain.\n");
829 /* Bulk allocate all mbufs in the pool, in one go. */
830 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
832 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
835 /* Create a long mbuf chain. */
836 for (i = 1; i < NB_MBUF; i++) {
837 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
839 printf("rte_pktmbuf_chain() failed: %d\n", ret);
844 /* Free the mbuf chain containing all the mbufs. */
845 rte_pktmbuf_free_bulk(mbufs, 1);
846 /* Test that they have been returned to the pool. */
847 if (!rte_mempool_full(pool)) {
848 printf("mempool not full\n");
852 printf("Test bulk free of multiple chains using multiple pools.\n");
854 /* Create mbuf chains containing mbufs from different pools. */
855 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
856 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
857 for (i = 0; i < NB_MBUF * 2; i++) {
858 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
860 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
863 if ((i % CHAIN_LEN) == 0)
864 mbufs[i / CHAIN_LEN] = m;
866 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
868 /* Test that both pools have been emptied. */
869 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
870 printf("mempools not empty\n");
873 /* Free one mbuf chain. */
874 rte_pktmbuf_free_bulk(mbufs, 1);
875 /* Test that the segments have been returned to the pools. */
876 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
877 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
878 printf("all segments of first mbuf have not been returned\n");
881 /* Free the remaining mbuf chains. */
882 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
883 /* Test that they have been returned to the pools. */
884 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
885 printf("mempools not full\n");
896 printf("Free mbuf pools for bulk allocation.\n");
897 rte_mempool_free(pool);
898 rte_mempool_free(pool2);
903 * test that the pointer to the data on a packet mbuf is set properly
906 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
909 struct rte_mbuf *m[NB_MBUF];
912 for (i=0; i<NB_MBUF; i++)
915 /* alloc NB_MBUF mbufs */
916 for (i=0; i<NB_MBUF; i++) {
917 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
919 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
923 m[i]->data_off += 64;
927 for (i=0; i<NB_MBUF; i++) {
929 rte_pktmbuf_free(m[i]);
932 for (i=0; i<NB_MBUF; i++)
935 /* alloc NB_MBUF mbufs */
936 for (i=0; i<NB_MBUF; i++) {
937 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
939 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
943 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
944 printf("invalid data_off\n");
950 for (i=0; i<NB_MBUF; i++) {
952 rte_pktmbuf_free(m[i]);
959 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
962 struct rte_mbuf *m[NB_MBUF];
965 for (i=0; i<NB_MBUF; i++)
968 /* alloc NB_MBUF mbufs */
969 for (i=0; i<NB_MBUF; i++) {
970 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
972 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
978 for (i=0; i<NB_MBUF; i++) {
980 struct rte_mbuf *mb, *mt;
986 rte_pktmbuf_free_seg(mt);
995 * Stress test for rte_mbuf atomic refcnt.
996 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
997 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
1000 #ifdef RTE_MBUF_REFCNT_ATOMIC
1003 test_refcnt_worker(void *arg)
1005 unsigned lcore, free;
1007 struct rte_ring *refcnt_mbuf_ring = arg;
1009 lcore = rte_lcore_id();
1010 printf("%s started at lcore %u\n", __func__, lcore);
1013 while (refcnt_stop_workers == 0) {
1014 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1016 rte_pktmbuf_free(mp);
1020 refcnt_lcore[lcore] += free;
1021 printf("%s finished at lcore %u, "
1022 "number of freed mbufs: %u\n",
1023 __func__, lcore, free);
1028 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1029 struct rte_mempool *refcnt_pool,
1030 struct rte_ring *refcnt_mbuf_ring)
1033 unsigned i, n, tref, wn;
1038 /* For each mbuf in the pool:
1040 * - increment it's reference up to N+1,
1041 * - enqueue it N times into the ring for worker cores to free.
1043 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1044 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1046 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1048 if ((ref & 1) != 0) {
1049 rte_pktmbuf_refcnt_update(m, ref);
1051 rte_ring_enqueue(refcnt_mbuf_ring, m);
1053 while (ref-- != 0) {
1054 rte_pktmbuf_refcnt_update(m, 1);
1055 rte_ring_enqueue(refcnt_mbuf_ring, m);
1058 rte_pktmbuf_free(m);
1062 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1063 "%u from %u mbufs\n", lcore, iter, i, n);
1065 /* wait till worker lcores will consume all mbufs */
1066 while (!rte_ring_empty(refcnt_mbuf_ring))
1069 /* check that all mbufs are back into mempool by now */
1070 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1071 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1072 refcnt_lcore[lcore] += tref;
1073 printf("%s(lcore=%u, iter=%u) completed, "
1074 "%u references processed\n",
1075 __func__, lcore, iter, tref);
1081 rte_panic("(lcore=%u, iter=%u): after %us only "
1082 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1086 test_refcnt_main(struct rte_mempool *refcnt_pool,
1087 struct rte_ring *refcnt_mbuf_ring)
1091 lcore = rte_lcore_id();
1092 printf("%s started at lcore %u\n", __func__, lcore);
1094 for (i = 0; i != REFCNT_MAX_ITER; i++)
1095 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1097 refcnt_stop_workers = 1;
1100 printf("%s finished at lcore %u\n", __func__, lcore);
1107 test_refcnt_mbuf(void)
1109 #ifdef RTE_MBUF_REFCNT_ATOMIC
1110 unsigned int main_lcore, worker, tref;
1112 struct rte_mempool *refcnt_pool = NULL;
1113 struct rte_ring *refcnt_mbuf_ring = NULL;
1115 if (rte_lcore_count() < 2) {
1116 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1117 return TEST_SKIPPED;
1120 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1122 /* create refcnt pool & ring if they don't exist */
1124 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1125 REFCNT_MBUF_NUM, 0, 0, 0,
1127 if (refcnt_pool == NULL) {
1128 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1133 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1134 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1136 if (refcnt_mbuf_ring == NULL) {
1137 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1142 refcnt_stop_workers = 0;
1143 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1145 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
1147 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
1149 rte_eal_mp_wait_lcore();
1151 /* check that we processed all references */
1153 main_lcore = rte_get_main_lcore();
1155 RTE_LCORE_FOREACH_WORKER(worker)
1156 tref += refcnt_lcore[worker];
1158 if (tref != refcnt_lcore[main_lcore])
1159 rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
1160 tref, refcnt_lcore[main_lcore]);
1162 rte_mempool_dump(stdout, refcnt_pool);
1163 rte_ring_dump(stdout, refcnt_mbuf_ring);
1168 rte_mempool_free(refcnt_pool);
1169 rte_ring_free(refcnt_mbuf_ring);
1177 #include <sys/resource.h>
1178 #include <sys/time.h>
1179 #include <sys/wait.h>
1181 /* use fork() to test mbuf errors panic */
1183 verify_mbuf_check_panics(struct rte_mbuf *buf)
1193 /* No need to generate a coredump when panicking. */
1194 rl.rlim_cur = rl.rlim_max = 0;
1195 setrlimit(RLIMIT_CORE, &rl);
1196 rte_mbuf_sanity_check(buf, 1); /* should panic */
1197 exit(0); /* return normally if it doesn't panic */
1198 } else if (pid < 0) {
1199 printf("Fork Failed\n");
1210 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1212 struct rte_mbuf *buf;
1213 struct rte_mbuf badbuf;
1215 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1217 /* get a good mbuf to use to make copies */
1218 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1222 printf("Checking good mbuf initially\n");
1223 if (verify_mbuf_check_panics(buf) != -1)
1226 printf("Now checking for error conditions\n");
1228 if (verify_mbuf_check_panics(NULL)) {
1229 printf("Error with NULL mbuf test\n");
1235 if (verify_mbuf_check_panics(&badbuf)) {
1236 printf("Error with bad-pool mbuf test\n");
1241 badbuf.buf_iova = 0;
1242 if (verify_mbuf_check_panics(&badbuf)) {
1243 printf("Error with bad-physaddr mbuf test\n");
1248 badbuf.buf_addr = NULL;
1249 if (verify_mbuf_check_panics(&badbuf)) {
1250 printf("Error with bad-addr mbuf test\n");
1256 if (verify_mbuf_check_panics(&badbuf)) {
1257 printf("Error with bad-refcnt(0) mbuf test\n");
1262 badbuf.refcnt = UINT16_MAX;
1263 if (verify_mbuf_check_panics(&badbuf)) {
1264 printf("Error with bad-refcnt(MAX) mbuf test\n");
1272 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1276 struct rte_mbuf *m = NULL, *mbuf = NULL;
1284 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1289 printf("Number of segments must be 1 or more (is %d)\n",
1294 seg_len = pkt_len / nb_segs;
1300 /* Create chained mbuf_src and fill it generated data */
1301 for (seg = 0; remain > 0; seg++) {
1303 m = rte_pktmbuf_alloc(pktmbuf_pool);
1305 printf("Cannot create segment for source mbuf");
1309 /* Make sure if tailroom is zeroed */
1310 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1311 rte_pktmbuf_tailroom(m));
1314 if (data_len > seg_len)
1317 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1319 printf("Cannot append %d bytes to the mbuf\n",
1324 for (i = 0; i < data_len; i++)
1325 data[i] = (seg * seg_len + i) % 0x0ff;
1330 rte_pktmbuf_chain(mbuf, m);
1335 /* Create destination buffer to store coalesced data */
1336 if (rte_pktmbuf_linearize(mbuf)) {
1337 printf("Mbuf linearization failed\n");
1341 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1342 printf("Source buffer should be contiguous after "
1347 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1349 for (i = 0; i < pkt_len; i++)
1350 if (data[i] != (i % 0x0ff)) {
1351 printf("Incorrect data in linearized mbuf\n");
1355 rte_pktmbuf_free(mbuf);
1360 rte_pktmbuf_free(mbuf);
1365 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1367 struct test_mbuf_array {
1379 printf("Test mbuf linearize API\n");
1381 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1382 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1383 mbuf_array[i].nb_segs)) {
1384 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1385 mbuf_array[i].nb_segs);
1393 * Helper function for test_tx_ofload
1396 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1397 uint64_t tso, uint64_t ol3, uint64_t ol2)
1402 mb->tso_segsz = tso;
1403 mb->outer_l3_len = ol3;
1404 mb->outer_l2_len = ol2;
1408 test_tx_offload(void)
1410 struct rte_mbuf *mb;
1411 uint64_t tm, v1, v2;
1415 static volatile struct {
1422 const uint32_t num = 0x10000;
1424 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1425 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1426 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1427 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1429 printf("%s started, tx_offload = {\n"
1433 "\ttso_segsz=%#hx,\n"
1434 "\touter_l3_len=%#x,\n"
1435 "\touter_l2_len=%#x,\n"
1438 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1440 sz = sizeof(*mb) * num;
1441 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1443 printf("%s failed, out of memory\n", __func__);
1448 tm = rte_rdtsc_precise();
1450 for (i = 0; i != num; i++)
1451 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1452 txof.tso, txof.l3, txof.l2);
1454 tm = rte_rdtsc_precise() - tm;
1455 printf("%s set tx_offload by bit-fields: %u iterations, %"
1456 PRIu64 " cycles, %#Lf cycles/iter\n",
1457 __func__, num, tm, (long double)tm / num);
1459 v1 = mb[rte_rand() % num].tx_offload;
1462 tm = rte_rdtsc_precise();
1464 for (i = 0; i != num; i++)
1465 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1466 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1468 tm = rte_rdtsc_precise() - tm;
1469 printf("%s set raw tx_offload: %u iterations, %"
1470 PRIu64 " cycles, %#Lf cycles/iter\n",
1471 __func__, num, tm, (long double)tm / num);
1473 v2 = mb[rte_rand() % num].tx_offload;
1477 printf("%s finished\n"
1478 "expected tx_offload value: 0x%" PRIx64 ";\n"
1479 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1482 return (v1 == v2) ? 0 : -EINVAL;
1486 test_get_rx_ol_flag_list(void)
1488 int len = 6, ret = 0;
1492 /* Test case to check with null buffer */
1493 ret = rte_get_rx_ol_flag_list(0, NULL, 0);
1495 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1497 /* Test case to check with zero buffer len */
1498 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
1500 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1502 buflen = strlen(buf);
1504 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1507 /* Test case to check with reduced buffer len */
1508 ret = rte_get_rx_ol_flag_list(0, buf, len);
1510 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1512 buflen = strlen(buf);
1513 if (buflen != (len - 1))
1514 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1515 "received = %d\n", __func__,
1518 /* Test case to check with zero mask value */
1519 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf));
1521 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1523 buflen = strlen(buf);
1525 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1526 "non-zero, buffer should not be empty");
1528 /* Test case to check with valid mask value */
1529 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
1532 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1534 buflen = strlen(buf);
1536 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1537 "non-zero, buffer should not be empty");
1545 test_get_tx_ol_flag_list(void)
1547 int len = 6, ret = 0;
1551 /* Test case to check with null buffer */
1552 ret = rte_get_tx_ol_flag_list(0, NULL, 0);
1554 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1556 /* Test case to check with zero buffer len */
1557 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
1559 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1561 buflen = strlen(buf);
1563 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1567 /* Test case to check with reduced buffer len */
1568 ret = rte_get_tx_ol_flag_list(0, buf, len);
1570 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1572 buflen = strlen(buf);
1573 if (buflen != (len - 1))
1574 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1575 "received = %d\n", __func__,
1578 /* Test case to check with zero mask value */
1579 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf));
1581 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1583 buflen = strlen(buf);
1585 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1586 "non-zero, buffer should not be empty");
1588 /* Test case to check with valid mask value */
1589 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
1592 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1594 buflen = strlen(buf);
1596 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1597 "non-zero, buffer should not be empty");
1611 test_get_rx_ol_flag_name(void)
1614 const char *flag_str = NULL;
1615 const struct flag_name rx_flags[] = {
1616 VAL_NAME(RTE_MBUF_F_RX_VLAN),
1617 VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
1618 VAL_NAME(RTE_MBUF_F_RX_FDIR),
1619 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
1620 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
1621 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
1622 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
1623 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
1624 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
1625 VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
1626 VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
1627 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
1628 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
1629 VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
1630 VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
1631 VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
1632 VAL_NAME(RTE_MBUF_F_RX_LRO),
1633 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
1634 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
1635 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
1636 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
1637 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
1640 /* Test case to check with valid flag */
1641 for (i = 0; i < RTE_DIM(rx_flags); i++) {
1642 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag);
1643 if (flag_str == NULL)
1644 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1645 __func__, rx_flags[i].name);
1646 if (strcmp(flag_str, rx_flags[i].name) != 0)
1647 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1648 __func__, rx_flags[i].name, flag_str);
1650 /* Test case to check with invalid flag */
1651 flag_str = rte_get_rx_ol_flag_name(0);
1652 if (flag_str != NULL) {
1653 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1654 __func__, flag_str);
1663 test_get_tx_ol_flag_name(void)
1666 const char *flag_str = NULL;
1667 const struct flag_name tx_flags[] = {
1668 VAL_NAME(RTE_MBUF_F_TX_VLAN),
1669 VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
1670 VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
1671 VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
1672 VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
1673 VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
1674 VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
1675 VAL_NAME(RTE_MBUF_F_TX_IPV4),
1676 VAL_NAME(RTE_MBUF_F_TX_IPV6),
1677 VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
1678 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
1679 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
1680 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
1681 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
1682 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
1683 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
1684 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
1685 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
1686 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
1687 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
1688 VAL_NAME(RTE_MBUF_F_TX_QINQ),
1689 VAL_NAME(RTE_MBUF_F_TX_MACSEC),
1690 VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
1691 VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
1692 VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
1695 /* Test case to check with valid flag */
1696 for (i = 0; i < RTE_DIM(tx_flags); i++) {
1697 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag);
1698 if (flag_str == NULL)
1699 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1700 __func__, tx_flags[i].name);
1701 if (strcmp(flag_str, tx_flags[i].name) != 0)
1702 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1703 __func__, tx_flags[i].name, flag_str);
1705 /* Test case to check with invalid flag */
1706 flag_str = rte_get_tx_ol_flag_name(0);
1707 if (flag_str != NULL) {
1708 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1709 __func__, flag_str);
1719 test_mbuf_validate_tx_offload(const char *test_name,
1720 struct rte_mempool *pktmbuf_pool,
1723 int expected_retval)
1725 struct rte_mbuf *m = NULL;
1728 /* alloc a mbuf and do sanity check */
1729 m = rte_pktmbuf_alloc(pktmbuf_pool);
1731 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1732 if (rte_pktmbuf_pkt_len(m) != 0)
1733 GOTO_FAIL("%s: Bad packet length\n", __func__);
1734 rte_mbuf_sanity_check(m, 0);
1735 m->ol_flags = ol_flags;
1736 m->tso_segsz = segsize;
1737 ret = rte_validate_tx_offload(m);
1738 if (ret != expected_retval)
1739 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1740 __func__, test_name, expected_retval, ret);
1741 rte_pktmbuf_free(m);
1746 rte_pktmbuf_free(m);
1753 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1755 /* test to validate tx offload flags */
1756 uint64_t ol_flags = 0;
1758 /* test to validate if IP checksum is counted only for IPV4 packet */
1759 /* set both IP checksum and IPV6 flags */
1760 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1761 ol_flags |= RTE_MBUF_F_TX_IPV6;
1762 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1764 ol_flags, 0, -EINVAL) < 0)
1765 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1766 /* resetting ol_flags for next testcase */
1769 /* test to validate if IP type is set when required */
1770 ol_flags |= RTE_MBUF_F_TX_L4_MASK;
1771 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1773 ol_flags, 0, -EINVAL) < 0)
1774 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1776 /* test if IP type is set when TCP SEG is on */
1777 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1778 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1780 ol_flags, 0, -EINVAL) < 0)
1781 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1784 /* test to confirm IP type (IPV4/IPV6) is set */
1785 ol_flags = RTE_MBUF_F_TX_L4_MASK;
1786 ol_flags |= RTE_MBUF_F_TX_IPV6;
1787 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1789 ol_flags, 0, 0) < 0)
1790 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1793 /* test to check TSO segment size is non-zero */
1794 ol_flags |= RTE_MBUF_F_TX_IPV4;
1795 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1796 /* set 0 tso segment size */
1797 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1799 ol_flags, 0, -EINVAL) < 0)
1800 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1802 /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
1803 /* set valid tso segment size but IP CKSUM not set */
1804 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1806 ol_flags, 512, -EINVAL) < 0)
1807 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1809 /* test to validate if IP checksum is set for TSO capability */
1810 /* retain IPV4, TCP_SEG, tso_seg size */
1811 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1812 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1814 ol_flags, 512, 0) < 0)
1815 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1817 /* test to confirm TSO for IPV6 type */
1819 ol_flags |= RTE_MBUF_F_TX_IPV6;
1820 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1821 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1823 ol_flags, 512, 0) < 0)
1824 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1827 /* test if outer IP checksum set for non outer IPv4 packet */
1828 ol_flags |= RTE_MBUF_F_TX_IPV6;
1829 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1830 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1832 ol_flags, 512, -EINVAL) < 0)
1833 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1836 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1837 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1838 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
1839 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1841 ol_flags, 512, 0) < 0)
1842 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1845 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1846 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1848 ol_flags, 512, 0) < 0)
1849 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1856 * Test for allocating a bulk of mbufs
1857 * define an array with positive sizes for mbufs allocations.
1860 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1863 unsigned int idx, loop;
1864 unsigned int alloc_counts[] = {
1866 MEMPOOL_CACHE_SIZE - 1,
1867 MEMPOOL_CACHE_SIZE + 1,
1868 MEMPOOL_CACHE_SIZE * 1.5,
1869 MEMPOOL_CACHE_SIZE * 2,
1870 MEMPOOL_CACHE_SIZE * 2 - 1,
1871 MEMPOOL_CACHE_SIZE * 2 + 1,
1875 /* allocate a large array of mbuf pointers */
1876 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1877 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1878 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1881 for (loop = 0; loop < alloc_counts[idx] &&
1882 mbufs[loop] != NULL; loop++)
1883 rte_pktmbuf_free(mbufs[loop]);
1884 } else if (ret != 0) {
1885 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1886 __func__, alloc_counts[idx], ret);
1894 * Negative testing for allocating a bulk of mbufs
1897 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1900 unsigned int idx, loop;
1901 unsigned int neg_alloc_counts[] = {
1902 MEMPOOL_CACHE_SIZE - NB_MBUF,
1907 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1909 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1910 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1911 neg_alloc_counts[idx]);
1913 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1914 __func__, neg_alloc_counts[idx], ret);
1915 for (loop = 0; loop < neg_alloc_counts[idx] &&
1916 mbufs[loop] != NULL; loop++)
1917 rte_pktmbuf_free(mbufs[loop]);
1925 * Test to read mbuf packet using rte_pktmbuf_read
1928 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1930 struct rte_mbuf *m = NULL;
1932 const char *data_copy = NULL;
1936 m = rte_pktmbuf_alloc(pktmbuf_pool);
1938 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1939 if (rte_pktmbuf_pkt_len(m) != 0)
1940 GOTO_FAIL("%s: Bad packet length\n", __func__);
1941 rte_mbuf_sanity_check(m, 0);
1943 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1945 GOTO_FAIL("%s: Cannot append data\n", __func__);
1946 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1947 GOTO_FAIL("%s: Bad packet length\n", __func__);
1948 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1950 /* read the data from mbuf */
1951 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1952 if (data_copy == NULL)
1953 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1954 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1955 if (data_copy[off] != (char)0xfe)
1956 GOTO_FAIL("Data corrupted at offset %u", off);
1958 rte_pktmbuf_free(m);
1964 rte_pktmbuf_free(m);
1971 * Test to read mbuf packet data from offset
1974 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1976 struct rte_mbuf *m = NULL;
1977 struct ether_hdr *hdr = NULL;
1979 const char *data_copy = NULL;
1981 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1984 m = rte_pktmbuf_alloc(pktmbuf_pool);
1986 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1988 if (rte_pktmbuf_pkt_len(m) != 0)
1989 GOTO_FAIL("%s: Bad packet length\n", __func__);
1990 rte_mbuf_sanity_check(m, 0);
1992 /* prepend an ethernet header */
1993 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1995 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1996 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1997 GOTO_FAIL("%s: Bad pkt length", __func__);
1998 if (rte_pktmbuf_data_len(m) != hdr_len)
1999 GOTO_FAIL("%s: Bad data length", __func__);
2000 memset(hdr, 0xde, hdr_len);
2002 /* read mbuf header info from 0 offset */
2003 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
2004 if (data_copy == NULL)
2005 GOTO_FAIL("%s: Error in reading header!\n", __func__);
2006 for (off = 0; off < hdr_len; off++) {
2007 if (data_copy[off] != (char)0xde)
2008 GOTO_FAIL("Header info corrupted at offset %u", off);
2011 /* append sample data after ethernet header */
2012 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
2014 GOTO_FAIL("%s: Cannot append data\n", __func__);
2015 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2016 GOTO_FAIL("%s: Bad packet length\n", __func__);
2017 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
2018 GOTO_FAIL("%s: Bad data length\n", __func__);
2019 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
2021 /* read mbuf data after header info */
2022 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
2023 if (data_copy == NULL)
2024 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
2025 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2026 if (data_copy[off] != (char)0xcc)
2027 GOTO_FAIL("Data corrupted at offset %u", off);
2030 /* partial reading of mbuf data */
2031 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
2033 if (data_copy == NULL)
2034 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2035 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
2036 GOTO_FAIL("%s: Incorrect data length!\n", __func__);
2037 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
2038 if (data_copy[off] != (char)0xcc)
2039 GOTO_FAIL("Data corrupted at offset %u", off);
2042 /* read length greater than mbuf data_len */
2043 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
2045 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
2048 /* read length greater than mbuf pkt_len */
2049 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
2051 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
2054 /* read data of zero len from valid offset */
2055 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
2056 if (data_copy == NULL)
2057 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2058 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
2059 GOTO_FAIL("%s: Corrupted data content!\n", __func__);
2060 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2061 if (data_copy[off] != (char)0xcc)
2062 GOTO_FAIL("Data corrupted at offset %u", off);
2065 /* read data of zero length from zero offset */
2066 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
2067 if (data_copy == NULL)
2068 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2069 /* check if the received address is the beginning of header info */
2070 if (hdr != (const struct ether_hdr *)data_copy)
2071 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2073 /* read data of max length from valid offset */
2074 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
2075 if (data_copy == NULL)
2076 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2077 /* check if the received address is the beginning of data segment */
2078 if (data_copy != data)
2079 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2081 /* try to read from mbuf with max size offset */
2082 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
2083 if (data_copy != NULL)
2084 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2086 /* try to read from mbuf with max size offset and len */
2087 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
2088 if (data_copy != NULL)
2089 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2091 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2093 rte_pktmbuf_free(m);
2099 rte_pktmbuf_free(m);
2106 unsigned int seg_count;
2110 unsigned int seg_lengths[MBUF_MAX_SEG];
2113 /* create a mbuf with different sized segments
2114 * and fill with data [0x00 0x01 0x02 ...]
2116 static struct rte_mbuf *
2117 create_packet(struct rte_mempool *pktmbuf_pool,
2118 struct test_case *test_data)
2120 uint16_t i, ret, seg, seg_len = 0;
2121 uint32_t last_index = 0;
2122 unsigned int seg_lengths[MBUF_MAX_SEG];
2123 unsigned int hdr_len;
2124 struct rte_mbuf *pkt = NULL;
2125 struct rte_mbuf *pkt_seg = NULL;
2129 memcpy(seg_lengths, test_data->seg_lengths,
2130 sizeof(unsigned int)*test_data->seg_count);
2131 for (seg = 0; seg < test_data->seg_count; seg++) {
2133 seg_len = seg_lengths[seg];
2134 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
2135 if (pkt_seg == NULL)
2136 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2137 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
2138 GOTO_FAIL("%s: Bad packet length\n", __func__);
2139 rte_mbuf_sanity_check(pkt_seg, 0);
2140 /* Add header only for the first segment */
2141 if (test_data->flags == MBUF_HEADER && seg == 0) {
2142 hdr_len = sizeof(struct rte_ether_hdr);
2143 /* prepend a header and fill with dummy data */
2144 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
2146 GOTO_FAIL("%s: Cannot prepend header\n",
2148 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
2149 GOTO_FAIL("%s: Bad pkt length", __func__);
2150 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
2151 GOTO_FAIL("%s: Bad data length", __func__);
2152 for (i = 0; i < hdr_len; i++)
2153 hdr[i] = (last_index + i) % 0xffff;
2154 last_index += hdr_len;
2156 /* skip appending segment with 0 length */
2159 data = rte_pktmbuf_append(pkt_seg, seg_len);
2161 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
2162 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
2163 GOTO_FAIL("%s: Bad packet segment length: %d\n",
2164 __func__, rte_pktmbuf_pkt_len(pkt_seg));
2165 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
2166 GOTO_FAIL("%s: Bad data length\n", __func__);
2167 for (i = 0; i < seg_len; i++)
2168 data[i] = (last_index + i) % 0xffff;
2169 /* to fill continuous data from one seg to another */
2171 /* create chained mbufs */
2175 ret = rte_pktmbuf_chain(pkt, pkt_seg);
2177 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
2181 pkt_seg = pkt_seg->next;
2186 rte_pktmbuf_free(pkt);
2189 if (pkt_seg != NULL) {
2190 rte_pktmbuf_free(pkt_seg);
2197 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
2200 struct test_case test_cases[] = {
2202 .seg_lengths = { 100, 100, 100 },
2204 .flags = MBUF_NO_HEADER,
2209 .seg_lengths = { 100, 125, 150 },
2211 .flags = MBUF_NO_HEADER,
2216 .seg_lengths = { 100, 100 },
2218 .flags = MBUF_NO_HEADER,
2223 .seg_lengths = { 100, 200 },
2225 .flags = MBUF_HEADER,
2226 .read_off = sizeof(struct rte_ether_hdr),
2230 .seg_lengths = { 1000, 100 },
2232 .flags = MBUF_NO_HEADER,
2237 .seg_lengths = { 1024, 0, 100 },
2239 .flags = MBUF_NO_HEADER,
2244 .seg_lengths = { 1000, 1, 1000 },
2246 .flags = MBUF_NO_HEADER,
2251 .seg_lengths = { MBUF_TEST_DATA_LEN,
2252 MBUF_TEST_DATA_LEN2,
2253 MBUF_TEST_DATA_LEN3, 800, 10 },
2255 .flags = MBUF_NEG_TEST_READ,
2257 .read_len = MBUF_DATA_SIZE
2262 const char *data_copy = NULL;
2263 char data_buf[MBUF_DATA_SIZE];
2265 memset(data_buf, 0, MBUF_DATA_SIZE);
2267 for (i = 0; i < RTE_DIM(test_cases); i++) {
2268 m = create_packet(pktmbuf_pool, &test_cases[i]);
2270 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2272 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2273 test_cases[i].read_len, data_buf);
2274 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2275 if (data_copy != NULL)
2276 GOTO_FAIL("%s: mbuf data read should fail!\n",
2279 rte_pktmbuf_free(m);
2284 if (data_copy == NULL)
2285 GOTO_FAIL("%s: Error in reading packet data!\n",
2287 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2288 if (data_copy[pos] !=
2289 (char)((test_cases[i].read_off + pos)
2291 GOTO_FAIL("Data corrupted at offset %u is %2X",
2292 pos, data_copy[pos]);
2294 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2295 rte_pktmbuf_free(m);
2302 rte_pktmbuf_free(m);
2308 /* Define a free call back function to be used for external buffer */
2310 ext_buf_free_callback_fn(void *addr, void *opaque)
2312 bool *freed = opaque;
2315 printf("External buffer address is invalid\n");
2320 printf("External buffer freed via callback\n");
2324 * Test to initialize shared data in external buffer before attaching to mbuf
2325 * - Allocate mbuf with no data.
2326 * - Allocate external buffer with size should be large enough to accommodate
2327 * rte_mbuf_ext_shared_info.
2328 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2329 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2330 * - Clone another mbuf and attach the same external buffer to it.
2331 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2334 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2336 struct rte_mbuf *m = NULL;
2337 struct rte_mbuf *clone = NULL;
2338 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2339 rte_iova_t buf_iova;
2340 void *ext_buf_addr = NULL;
2341 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2342 sizeof(struct rte_mbuf_ext_shared_info);
2346 m = rte_pktmbuf_alloc(pktmbuf_pool);
2348 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2349 if (rte_pktmbuf_pkt_len(m) != 0)
2350 GOTO_FAIL("%s: Bad packet length\n", __func__);
2351 rte_mbuf_sanity_check(m, 0);
2353 ext_buf_addr = rte_malloc("External buffer", buf_len,
2354 RTE_CACHE_LINE_SIZE);
2355 if (ext_buf_addr == NULL)
2356 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2358 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2359 ext_buf_free_callback_fn, &freed);
2360 if (ret_shinfo == NULL)
2361 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2363 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2364 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2366 if (rte_mbuf_refcnt_read(m) != 1)
2367 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2369 buf_iova = rte_mem_virt2iova(ext_buf_addr);
2370 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2372 if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
2373 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2376 /* allocate one more mbuf */
2377 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2379 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2380 if (rte_pktmbuf_pkt_len(clone) != 0)
2381 GOTO_FAIL("%s: Bad packet length\n", __func__);
2383 /* attach the same external buffer to the cloned mbuf */
2384 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
2386 if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
2387 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2390 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2391 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2393 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2395 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2396 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2397 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2398 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2400 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2402 /* reset the ext_refcnt before freeing the external buffer */
2403 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2404 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2405 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2407 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2409 /* detach the external buffer from mbufs */
2410 rte_pktmbuf_detach_extbuf(m);
2411 /* check if ref cnt is decremented */
2412 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2413 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2415 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2417 rte_pktmbuf_detach_extbuf(clone);
2419 GOTO_FAIL("%s: extbuf should be freed\n", __func__);
2422 rte_pktmbuf_free(m);
2424 rte_pktmbuf_free(clone);
2431 rte_pktmbuf_free(m);
2435 rte_pktmbuf_free(clone);
2438 if (ext_buf_addr != NULL) {
2439 rte_free(ext_buf_addr);
2440 ext_buf_addr = NULL;
2446 * Test the mbuf pool with pinned external data buffers
2447 * - Allocate memory zone for external buffer
2448 * - Create the mbuf pool with pinned external buffer
2449 * - Check the created pool with relevant mbuf pool unit tests
2452 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool)
2455 struct rte_pktmbuf_extmem ext_mem;
2456 struct rte_mempool *pinned_pool = NULL;
2457 const struct rte_memzone *mz = NULL;
2459 printf("Test mbuf pool with external pinned data buffers\n");
2461 /* Allocate memzone for the external data buffer */
2462 mz = rte_memzone_reserve("pinned_pool",
2463 NB_MBUF * MBUF_DATA_SIZE,
2465 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY);
2467 GOTO_FAIL("%s: Memzone allocation failed\n", __func__);
2469 /* Create the mbuf pool with pinned external data buffer */
2470 ext_mem.buf_ptr = mz->addr;
2471 ext_mem.buf_iova = mz->iova;
2472 ext_mem.buf_len = mz->len;
2473 ext_mem.elt_size = MBUF_DATA_SIZE;
2475 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool",
2476 NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
2477 MBUF_DATA_SIZE, SOCKET_ID_ANY,
2479 if (pinned_pool == NULL)
2480 GOTO_FAIL("%s: Mbuf pool with pinned external"
2481 " buffer creation failed\n", __func__);
2482 /* test multiple mbuf alloc */
2483 if (test_pktmbuf_pool(pinned_pool) < 0)
2484 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n",
2487 /* do it another time to check that all mbufs were freed */
2488 if (test_pktmbuf_pool(pinned_pool) < 0)
2489 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n",
2492 /* test that the data pointer on a packet mbuf is set properly */
2493 if (test_pktmbuf_pool_ptr(pinned_pool) < 0)
2494 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n",
2497 /* test data manipulation in mbuf with non-ascii data */
2498 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0)
2499 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)"
2500 " failed\n", __func__);
2502 /* test free pktmbuf segment one by one */
2503 if (test_pktmbuf_free_segment(pinned_pool) < 0)
2504 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n",
2507 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0)
2508 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n",
2511 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0)
2512 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n",
2515 if (test_failing_mbuf_sanity_check(pinned_pool) < 0)
2516 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)"
2517 " failed\n", __func__);
2519 if (test_mbuf_linearize_check(pinned_pool) < 0)
2520 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n",
2523 /* test for allocating a bulk of mbufs with various sizes */
2524 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0)
2525 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n",
2528 /* test for allocating a bulk of mbufs with various sizes */
2529 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0)
2530 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)"
2531 " failed\n", __func__);
2533 /* test to read mbuf packet */
2534 if (test_pktmbuf_read(pinned_pool) < 0)
2535 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n",
2538 /* test to read mbuf packet from offset */
2539 if (test_pktmbuf_read_from_offset(pinned_pool) < 0)
2540 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)"
2541 " failed\n", __func__);
2543 /* test to read data from chain of mbufs with data segments */
2544 if (test_pktmbuf_read_from_chain(pinned_pool) < 0)
2545 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)"
2546 " failed\n", __func__);
2548 RTE_SET_USED(std_pool);
2549 rte_mempool_free(pinned_pool);
2550 rte_memzone_free(mz);
2554 rte_mempool_free(pinned_pool);
2555 rte_memzone_free(mz);
2560 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
2562 const struct rte_mbuf_dynfield dynfield = {
2563 .name = "test-dynfield",
2564 .size = sizeof(uint8_t),
2565 .align = __alignof__(uint8_t),
2568 const struct rte_mbuf_dynfield dynfield2 = {
2569 .name = "test-dynfield2",
2570 .size = sizeof(uint16_t),
2571 .align = __alignof__(uint16_t),
2574 const struct rte_mbuf_dynfield dynfield3 = {
2575 .name = "test-dynfield3",
2576 .size = sizeof(uint8_t),
2577 .align = __alignof__(uint8_t),
2580 const struct rte_mbuf_dynfield dynfield_fail_big = {
2581 .name = "test-dynfield-fail-big",
2586 const struct rte_mbuf_dynfield dynfield_fail_align = {
2587 .name = "test-dynfield-fail-align",
2592 const struct rte_mbuf_dynfield dynfield_fail_flag = {
2593 .name = "test-dynfield",
2594 .size = sizeof(uint8_t),
2595 .align = __alignof__(uint8_t),
2598 const struct rte_mbuf_dynflag dynflag_fail_flag = {
2599 .name = "test-dynflag",
2602 const struct rte_mbuf_dynflag dynflag = {
2603 .name = "test-dynflag",
2606 const struct rte_mbuf_dynflag dynflag2 = {
2607 .name = "test-dynflag2",
2610 const struct rte_mbuf_dynflag dynflag3 = {
2611 .name = "test-dynflag3",
2614 struct rte_mbuf *m = NULL;
2615 int offset, offset2, offset3;
2616 int flag, flag2, flag3;
2619 printf("Test mbuf dynamic fields and flags\n");
2620 rte_mbuf_dyn_dump(stdout);
2622 offset = rte_mbuf_dynfield_register(&dynfield);
2624 GOTO_FAIL("failed to register dynamic field, offset=%d: %s",
2625 offset, strerror(errno));
2627 ret = rte_mbuf_dynfield_register(&dynfield);
2629 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s",
2630 ret, strerror(errno));
2632 offset2 = rte_mbuf_dynfield_register(&dynfield2);
2633 if (offset2 == -1 || offset2 == offset || (offset2 & 1))
2634 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s",
2635 offset2, strerror(errno));
2637 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
2638 offsetof(struct rte_mbuf, dynfield1[1]));
2639 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
2640 if (rte_errno == EBUSY)
2641 printf("mbuf test error skipped: dynfield is busy\n");
2643 GOTO_FAIL("failed to register dynamic field 3, offset="
2644 "%d: %s", offset3, strerror(errno));
2647 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
2648 offset, offset2, offset3);
2650 ret = rte_mbuf_dynfield_register(&dynfield_fail_big);
2652 GOTO_FAIL("dynamic field creation should fail (too big)");
2654 ret = rte_mbuf_dynfield_register(&dynfield_fail_align);
2656 GOTO_FAIL("dynamic field creation should fail (bad alignment)");
2658 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align,
2659 offsetof(struct rte_mbuf, ol_flags));
2661 GOTO_FAIL("dynamic field creation should fail (not avail)");
2663 ret = rte_mbuf_dynfield_register(&dynfield_fail_flag);
2665 GOTO_FAIL("dynamic field creation should fail (invalid flag)");
2667 ret = rte_mbuf_dynflag_register(&dynflag_fail_flag);
2669 GOTO_FAIL("dynamic flag creation should fail (invalid flag)");
2671 flag = rte_mbuf_dynflag_register(&dynflag);
2673 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s",
2674 flag, strerror(errno));
2676 ret = rte_mbuf_dynflag_register(&dynflag);
2678 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s",
2679 ret, strerror(errno));
2681 flag2 = rte_mbuf_dynflag_register(&dynflag2);
2682 if (flag2 == -1 || flag2 == flag)
2683 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s",
2684 flag2, strerror(errno));
2686 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
2687 rte_bsf64(RTE_MBUF_F_LAST_FREE));
2688 if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
2689 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
2690 flag3, strerror(errno));
2692 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
2694 /* set, get dynamic field */
2695 m = rte_pktmbuf_alloc(pktmbuf_pool);
2697 GOTO_FAIL("Cannot allocate mbuf");
2699 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1;
2700 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1)
2701 GOTO_FAIL("failed to read dynamic field");
2702 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000;
2703 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000)
2704 GOTO_FAIL("failed to read dynamic field");
2706 /* set a dynamic flag */
2707 m->ol_flags |= (1ULL << flag);
2709 rte_mbuf_dyn_dump(stdout);
2710 rte_pktmbuf_free(m);
2713 rte_pktmbuf_free(m);
2717 /* check that m->nb_segs and m->next are reset on mbuf free */
2719 test_nb_segs_and_next_reset(void)
2721 struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
2722 struct rte_mempool *pool = NULL;
2724 pool = rte_pktmbuf_pool_create("test_mbuf_reset",
2725 3, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
2727 GOTO_FAIL("Failed to create mbuf pool");
2730 m0 = rte_pktmbuf_alloc(pool);
2731 m1 = rte_pktmbuf_alloc(pool);
2732 m2 = rte_pktmbuf_alloc(pool);
2733 if (m0 == NULL || m1 == NULL || m2 == NULL)
2734 GOTO_FAIL("Failed to allocate mbuf");
2736 /* append data in all of them */
2737 if (rte_pktmbuf_append(m0, 500) == NULL ||
2738 rte_pktmbuf_append(m1, 500) == NULL ||
2739 rte_pktmbuf_append(m2, 500) == NULL)
2740 GOTO_FAIL("Failed to append data in mbuf");
2742 /* chain them in one mbuf m0 */
2743 rte_pktmbuf_chain(m1, m2);
2744 rte_pktmbuf_chain(m0, m1);
2745 if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
2748 GOTO_FAIL("Failed to chain mbufs");
2751 /* split m0 chain in two, between m1 and m2 */
2756 /* free the 2 mbuf chains m0 and m2 */
2757 rte_pktmbuf_free(m0);
2758 rte_pktmbuf_free(m2);
2760 /* realloc the 3 mbufs */
2761 m0 = rte_mbuf_raw_alloc(pool);
2762 m1 = rte_mbuf_raw_alloc(pool);
2763 m2 = rte_mbuf_raw_alloc(pool);
2764 if (m0 == NULL || m1 == NULL || m2 == NULL)
2765 GOTO_FAIL("Failed to reallocate mbuf");
2767 /* ensure that m->next and m->nb_segs are reset allocated mbufs */
2768 if (m0->nb_segs != 1 || m0->next != NULL ||
2769 m1->nb_segs != 1 || m1->next != NULL ||
2770 m2->nb_segs != 1 || m2->next != NULL)
2771 GOTO_FAIL("nb_segs or next was not reset properly");
2777 rte_mempool_free(pool);
2785 struct rte_mempool *pktmbuf_pool = NULL;
2786 struct rte_mempool *pktmbuf_pool2 = NULL;
2789 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2791 /* create pktmbuf pool if it does not exist */
2792 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2793 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2796 if (pktmbuf_pool == NULL) {
2797 printf("cannot allocate mbuf pool\n");
2801 /* test registration of dynamic fields and flags */
2802 if (test_mbuf_dyn(pktmbuf_pool) < 0) {
2803 printf("mbuf dynflag test failed\n");
2807 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2809 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2810 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2813 if (pktmbuf_pool2 == NULL) {
2814 printf("cannot allocate mbuf pool\n");
2818 /* test multiple mbuf alloc */
2819 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2820 printf("test_mbuf_pool() failed\n");
2824 /* do it another time to check that all mbufs were freed */
2825 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2826 printf("test_mbuf_pool() failed (2)\n");
2830 /* test bulk mbuf alloc and free */
2831 if (test_pktmbuf_pool_bulk() < 0) {
2832 printf("test_pktmbuf_pool_bulk() failed\n");
2836 /* test that the pointer to the data on a packet mbuf is set properly */
2837 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2838 printf("test_pktmbuf_pool_ptr() failed\n");
2842 /* test data manipulation in mbuf */
2843 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2844 printf("test_one_mbuf() failed\n");
2850 * do it another time, to check that allocation reinitialize
2851 * the mbuf correctly
2853 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2854 printf("test_one_mbuf() failed (2)\n");
2858 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2859 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2863 /* test free pktmbuf segment one by one */
2864 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2865 printf("test_pktmbuf_free_segment() failed.\n");
2869 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) {
2870 printf("testclone_and_testupdate() failed \n");
2874 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) {
2875 printf("test_pktmbuf_copy() failed\n");
2879 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2880 printf("test_attach_from_different_pool() failed\n");
2884 if (test_refcnt_mbuf() < 0) {
2885 printf("test_refcnt_mbuf() failed \n");
2889 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2890 printf("test_failing_mbuf_sanity_check() failed\n");
2894 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2895 printf("test_mbuf_linearize_check() failed\n");
2899 if (test_tx_offload() < 0) {
2900 printf("test_tx_offload() failed\n");
2904 if (test_get_rx_ol_flag_list() < 0) {
2905 printf("test_rte_get_rx_ol_flag_list() failed\n");
2909 if (test_get_tx_ol_flag_list() < 0) {
2910 printf("test_rte_get_tx_ol_flag_list() failed\n");
2914 if (test_get_rx_ol_flag_name() < 0) {
2915 printf("test_rte_get_rx_ol_flag_name() failed\n");
2919 if (test_get_tx_ol_flag_name() < 0) {
2920 printf("test_rte_get_tx_ol_flag_name() failed\n");
2924 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2925 printf("test_mbuf_validate_tx_offload_one() failed\n");
2929 /* test for allocating a bulk of mbufs with various sizes */
2930 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2931 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2935 /* test for allocating a bulk of mbufs with various sizes */
2936 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2937 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2941 /* test to read mbuf packet */
2942 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2943 printf("test_rte_pktmbuf_read() failed\n");
2947 /* test to read mbuf packet from offset */
2948 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2949 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2953 /* test to read data from chain of mbufs with data segments */
2954 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2955 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2959 /* test to initialize shared info. at the end of external buffer */
2960 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2961 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2965 /* test the mbuf pool with pinned external data buffers */
2966 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) {
2967 printf("test_pktmbuf_ext_pinned_buffer() failed\n");
2971 /* test reset of m->nb_segs and m->next on mbuf free */
2972 if (test_nb_segs_and_next_reset() < 0) {
2973 printf("test_nb_segs_and_next_reset() failed\n");
2979 rte_mempool_free(pktmbuf_pool);
2980 rte_mempool_free(pktmbuf_pool2);
2985 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);