1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_errno.h>
16 #include <rte_debug.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
27 #include <rte_mempool.h>
29 #include <rte_random.h>
30 #include <rte_cycles.h>
31 #include <rte_malloc.h>
32 #include <rte_ether.h>
38 #define MEMPOOL_CACHE_SIZE 32
39 #define MBUF_DATA_SIZE 2048
41 #define MBUF_TEST_DATA_LEN 1464
42 #define MBUF_TEST_DATA_LEN2 50
43 #define MBUF_TEST_DATA_LEN3 256
44 #define MBUF_TEST_HDR1_LEN 20
45 #define MBUF_TEST_HDR2_LEN 30
46 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
47 #define MBUF_TEST_SEG_SIZE 64
48 #define MBUF_TEST_BURST 8
49 #define EXT_BUF_TEST_DATA_LEN 1024
50 #define MBUF_MAX_SEG 16
51 #define MBUF_NO_HEADER 0
53 #define MBUF_NEG_TEST_READ 2
55 /* chain length in bulk test */
58 /* size of private data for mbuf in pktmbuf_pool2 */
59 #define MBUF2_PRIV_SIZE 128
61 #define REFCNT_MAX_ITER 64
62 #define REFCNT_MAX_TIMEOUT 10
63 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
64 #define REFCNT_MBUF_NUM 64
65 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
67 #define MAGIC_DATA 0x42424242
69 #define MAKE_STRING(x) # x
71 #ifdef RTE_MBUF_REFCNT_ATOMIC
73 static volatile uint32_t refcnt_stop_slaves;
74 static unsigned refcnt_lcore[RTE_MAX_LCORE];
82 * #. Allocate a mbuf pool.
84 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
87 * #. Test multiple allocations of mbufs from this pool.
89 * - Allocate NB_MBUF and store pointers in a table.
90 * - If an allocation fails, return an error.
91 * - Free all these mbufs.
92 * - Repeat the same test to check that mbufs were freed correctly.
94 * #. Test data manipulation in pktmbuf.
97 * - Append data using rte_pktmbuf_append().
98 * - Test for error in rte_pktmbuf_append() when len is too large.
99 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
100 * - Test for error in rte_pktmbuf_trim() when len is too large.
101 * - Prepend a header using rte_pktmbuf_prepend().
102 * - Test for error in rte_pktmbuf_prepend() when len is too large.
103 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
104 * - Test for error in rte_pktmbuf_adj() when len is too large.
105 * - Check that appended data is not corrupt.
107 * - Between all these tests, check data_len and pkt_len, and
108 * that the mbuf is contiguous.
109 * - Repeat the test to check that allocation operations
110 * reinitialize the mbuf correctly.
112 * #. Test packet cloning
113 * - Clone a mbuf and verify the data
114 * - Clone the cloned mbuf and verify the data
115 * - Attach a mbuf to another that does not have the same priv_size.
118 #define GOTO_FAIL(str, ...) do { \
119 printf("mbuf test FAILED (l.%d): <" str ">\n", \
120 __LINE__, ##__VA_ARGS__); \
125 * test data manipulation in mbuf with non-ascii data
128 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
130 struct rte_mbuf *m = NULL;
133 m = rte_pktmbuf_alloc(pktmbuf_pool);
135 GOTO_FAIL("Cannot allocate mbuf");
136 if (rte_pktmbuf_pkt_len(m) != 0)
137 GOTO_FAIL("Bad length");
139 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
141 GOTO_FAIL("Cannot append data");
142 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
143 GOTO_FAIL("Bad pkt length");
144 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
145 GOTO_FAIL("Bad data length");
146 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
147 if (!rte_pktmbuf_is_contiguous(m))
148 GOTO_FAIL("Buffer should be continuous");
149 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
163 * test data manipulation in mbuf
166 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
168 struct rte_mbuf *m = NULL;
169 char *data, *data2, *hdr;
172 printf("Test pktmbuf API\n");
176 m = rte_pktmbuf_alloc(pktmbuf_pool);
178 GOTO_FAIL("Cannot allocate mbuf");
179 if (rte_pktmbuf_pkt_len(m) != 0)
180 GOTO_FAIL("Bad length");
182 rte_pktmbuf_dump(stdout, m, 0);
186 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
188 GOTO_FAIL("Cannot append data");
189 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
190 GOTO_FAIL("Bad pkt length");
191 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
192 GOTO_FAIL("Bad data length");
193 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
194 if (!rte_pktmbuf_is_contiguous(m))
195 GOTO_FAIL("Buffer should be continuous");
196 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
197 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
199 /* this append should fail */
201 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
203 GOTO_FAIL("Append should not succeed");
205 /* append some more data */
207 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
209 GOTO_FAIL("Cannot append data");
210 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
211 GOTO_FAIL("Bad pkt length");
212 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
213 GOTO_FAIL("Bad data length");
214 if (!rte_pktmbuf_is_contiguous(m))
215 GOTO_FAIL("Buffer should be continuous");
217 /* trim data at the end of mbuf */
219 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
220 GOTO_FAIL("Cannot trim data");
221 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
222 GOTO_FAIL("Bad pkt length");
223 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
224 GOTO_FAIL("Bad data length");
225 if (!rte_pktmbuf_is_contiguous(m))
226 GOTO_FAIL("Buffer should be continuous");
228 /* this trim should fail */
230 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
231 GOTO_FAIL("trim should not succeed");
233 /* prepend one header */
235 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
237 GOTO_FAIL("Cannot prepend");
238 if (data - hdr != MBUF_TEST_HDR1_LEN)
239 GOTO_FAIL("Prepend failed");
240 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
241 GOTO_FAIL("Bad pkt length");
242 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
243 GOTO_FAIL("Bad data length");
244 if (!rte_pktmbuf_is_contiguous(m))
245 GOTO_FAIL("Buffer should be continuous");
246 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
248 /* prepend another header */
250 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
252 GOTO_FAIL("Cannot prepend");
253 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
254 GOTO_FAIL("Prepend failed");
255 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
256 GOTO_FAIL("Bad pkt length");
257 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
258 GOTO_FAIL("Bad data length");
259 if (!rte_pktmbuf_is_contiguous(m))
260 GOTO_FAIL("Buffer should be continuous");
261 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
263 rte_mbuf_sanity_check(m, 1);
264 rte_mbuf_sanity_check(m, 0);
265 rte_pktmbuf_dump(stdout, m, 0);
267 /* this prepend should fail */
269 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
271 GOTO_FAIL("prepend should not succeed");
273 /* remove data at beginning of mbuf (adj) */
275 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
276 GOTO_FAIL("rte_pktmbuf_adj failed");
277 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
278 GOTO_FAIL("Bad pkt length");
279 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
280 GOTO_FAIL("Bad data length");
281 if (!rte_pktmbuf_is_contiguous(m))
282 GOTO_FAIL("Buffer should be continuous");
284 /* this adj should fail */
286 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
287 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
291 if (!rte_pktmbuf_is_contiguous(m))
292 GOTO_FAIL("Buffer should be continuous");
294 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
296 GOTO_FAIL("Data corrupted at offset %u", i);
312 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool)
314 struct rte_mbuf *m = NULL;
315 struct rte_mbuf *clone = NULL;
316 struct rte_mbuf *clone2 = NULL;
317 unaligned_uint32_t *data;
320 m = rte_pktmbuf_alloc(pktmbuf_pool);
322 GOTO_FAIL("ooops not allocating mbuf");
324 if (rte_pktmbuf_pkt_len(m) != 0)
325 GOTO_FAIL("Bad length");
327 rte_pktmbuf_append(m, sizeof(uint32_t));
328 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
331 /* clone the allocated mbuf */
332 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
334 GOTO_FAIL("cannot clone data\n");
336 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
337 if (*data != MAGIC_DATA)
338 GOTO_FAIL("invalid data in clone\n");
340 if (rte_mbuf_refcnt_read(m) != 2)
341 GOTO_FAIL("invalid refcnt in m\n");
344 rte_pktmbuf_free(clone);
347 /* same test with a chained mbuf */
348 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
350 GOTO_FAIL("Next Pkt Null\n");
353 rte_pktmbuf_append(m->next, sizeof(uint32_t));
354 m->pkt_len = 2 * sizeof(uint32_t);
356 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
359 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
361 GOTO_FAIL("cannot clone data\n");
363 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
364 if (*data != MAGIC_DATA)
365 GOTO_FAIL("invalid data in clone\n");
367 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
368 if (*data != MAGIC_DATA)
369 GOTO_FAIL("invalid data in clone->next\n");
371 if (rte_mbuf_refcnt_read(m) != 2)
372 GOTO_FAIL("invalid refcnt in m\n");
374 if (rte_mbuf_refcnt_read(m->next) != 2)
375 GOTO_FAIL("invalid refcnt in m->next\n");
377 /* try to clone the clone */
379 clone2 = rte_pktmbuf_clone(clone, pktmbuf_pool);
381 GOTO_FAIL("cannot clone the clone\n");
383 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
384 if (*data != MAGIC_DATA)
385 GOTO_FAIL("invalid data in clone2\n");
387 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
388 if (*data != MAGIC_DATA)
389 GOTO_FAIL("invalid data in clone2->next\n");
391 if (rte_mbuf_refcnt_read(m) != 3)
392 GOTO_FAIL("invalid refcnt in m\n");
394 if (rte_mbuf_refcnt_read(m->next) != 3)
395 GOTO_FAIL("invalid refcnt in m->next\n");
399 rte_pktmbuf_free(clone);
400 rte_pktmbuf_free(clone2);
405 printf("%s ok\n", __func__);
412 rte_pktmbuf_free(clone);
414 rte_pktmbuf_free(clone2);
419 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool)
421 struct rte_mbuf *m = NULL;
422 struct rte_mbuf *copy = NULL;
423 struct rte_mbuf *copy2 = NULL;
424 struct rte_mbuf *clone = NULL;
425 unaligned_uint32_t *data;
428 m = rte_pktmbuf_alloc(pktmbuf_pool);
430 GOTO_FAIL("ooops not allocating mbuf");
432 if (rte_pktmbuf_pkt_len(m) != 0)
433 GOTO_FAIL("Bad length");
435 rte_pktmbuf_append(m, sizeof(uint32_t));
436 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
439 /* copy the allocated mbuf */
440 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
442 GOTO_FAIL("cannot copy data\n");
444 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
445 GOTO_FAIL("copy length incorrect\n");
447 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
448 GOTO_FAIL("copy data length incorrect\n");
450 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
451 if (*data != MAGIC_DATA)
452 GOTO_FAIL("invalid data in copy\n");
455 rte_pktmbuf_free(copy);
458 /* same test with a cloned mbuf */
459 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
461 GOTO_FAIL("cannot clone data\n");
463 if (!RTE_MBUF_CLONED(clone))
464 GOTO_FAIL("clone did not give a cloned mbuf\n");
466 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
468 GOTO_FAIL("cannot copy cloned mbuf\n");
470 if (RTE_MBUF_CLONED(copy))
471 GOTO_FAIL("copy of clone is cloned?\n");
473 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
474 GOTO_FAIL("copy clone length incorrect\n");
476 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
477 GOTO_FAIL("copy clone data length incorrect\n");
479 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
480 if (*data != MAGIC_DATA)
481 GOTO_FAIL("invalid data in clone copy\n");
482 rte_pktmbuf_free(clone);
483 rte_pktmbuf_free(copy);
488 /* same test with a chained mbuf */
489 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
491 GOTO_FAIL("Next Pkt Null\n");
494 rte_pktmbuf_append(m->next, sizeof(uint32_t));
495 m->pkt_len = 2 * sizeof(uint32_t);
496 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
497 *data = MAGIC_DATA + 1;
499 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
501 GOTO_FAIL("cannot copy data\n");
503 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
504 GOTO_FAIL("chain copy length incorrect\n");
506 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
507 GOTO_FAIL("chain copy data length incorrect\n");
509 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
510 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
511 GOTO_FAIL("invalid data in copy\n");
513 rte_pktmbuf_free(copy2);
515 /* test offset copy */
516 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
517 sizeof(uint32_t), UINT32_MAX);
519 GOTO_FAIL("cannot copy the copy\n");
521 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
522 GOTO_FAIL("copy with offset, length incorrect\n");
524 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
525 GOTO_FAIL("copy with offset, data length incorrect\n");
527 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
528 if (data[0] != MAGIC_DATA + 1)
529 GOTO_FAIL("copy with offset, invalid data\n");
531 rte_pktmbuf_free(copy2);
533 /* test truncation copy */
534 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
535 0, sizeof(uint32_t));
537 GOTO_FAIL("cannot copy the copy\n");
539 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
540 GOTO_FAIL("copy with truncate, length incorrect\n");
542 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
543 GOTO_FAIL("copy with truncate, data length incorrect\n");
545 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
546 if (data[0] != MAGIC_DATA)
547 GOTO_FAIL("copy with truncate, invalid data\n");
551 rte_pktmbuf_free(copy);
552 rte_pktmbuf_free(copy2);
557 printf("%s ok\n", __func__);
564 rte_pktmbuf_free(copy);
566 rte_pktmbuf_free(copy2);
571 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
572 struct rte_mempool *pktmbuf_pool2)
574 struct rte_mbuf *m = NULL;
575 struct rte_mbuf *clone = NULL;
576 struct rte_mbuf *clone2 = NULL;
577 char *data, *c_data, *c_data2;
580 m = rte_pktmbuf_alloc(pktmbuf_pool);
582 GOTO_FAIL("cannot allocate mbuf");
584 if (rte_pktmbuf_pkt_len(m) != 0)
585 GOTO_FAIL("Bad length");
587 data = rte_pktmbuf_mtod(m, char *);
589 /* allocate a new mbuf from the second pool, and attach it to the first
591 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
593 GOTO_FAIL("cannot allocate mbuf from second pool\n");
595 /* check data room size and priv size, and erase priv */
596 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
597 GOTO_FAIL("data room size should be 0\n");
598 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
599 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
600 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
602 /* save data pointer to compare it after detach() */
603 c_data = rte_pktmbuf_mtod(clone, char *);
604 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
605 GOTO_FAIL("bad data pointer in clone");
606 if (rte_pktmbuf_headroom(clone) != 0)
607 GOTO_FAIL("bad headroom in clone");
609 rte_pktmbuf_attach(clone, m);
611 if (rte_pktmbuf_mtod(clone, char *) != data)
612 GOTO_FAIL("clone was not attached properly\n");
613 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
614 GOTO_FAIL("bad headroom in clone after attach");
615 if (rte_mbuf_refcnt_read(m) != 2)
616 GOTO_FAIL("invalid refcnt in m\n");
618 /* allocate a new mbuf from the second pool, and attach it to the first
620 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
622 GOTO_FAIL("cannot allocate clone2 from second pool\n");
624 /* check data room size and priv size, and erase priv */
625 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
626 GOTO_FAIL("data room size should be 0\n");
627 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
628 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
629 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
631 /* save data pointer to compare it after detach() */
632 c_data2 = rte_pktmbuf_mtod(clone2, char *);
633 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
634 GOTO_FAIL("bad data pointer in clone2");
635 if (rte_pktmbuf_headroom(clone2) != 0)
636 GOTO_FAIL("bad headroom in clone2");
638 rte_pktmbuf_attach(clone2, clone);
640 if (rte_pktmbuf_mtod(clone2, char *) != data)
641 GOTO_FAIL("clone2 was not attached properly\n");
642 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
643 GOTO_FAIL("bad headroom in clone2 after attach");
644 if (rte_mbuf_refcnt_read(m) != 3)
645 GOTO_FAIL("invalid refcnt in m\n");
647 /* detach the clones */
648 rte_pktmbuf_detach(clone);
649 if (c_data != rte_pktmbuf_mtod(clone, char *))
650 GOTO_FAIL("clone was not detached properly\n");
651 if (rte_mbuf_refcnt_read(m) != 2)
652 GOTO_FAIL("invalid refcnt in m\n");
654 rte_pktmbuf_detach(clone2);
655 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
656 GOTO_FAIL("clone2 was not detached properly\n");
657 if (rte_mbuf_refcnt_read(m) != 1)
658 GOTO_FAIL("invalid refcnt in m\n");
660 /* free the clones and the initial mbuf */
661 rte_pktmbuf_free(clone2);
662 rte_pktmbuf_free(clone);
664 printf("%s ok\n", __func__);
671 rte_pktmbuf_free(clone);
673 rte_pktmbuf_free(clone2);
678 * test allocation and free of mbufs
681 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
684 struct rte_mbuf *m[NB_MBUF];
687 for (i=0; i<NB_MBUF; i++)
690 /* alloc NB_MBUF mbufs */
691 for (i=0; i<NB_MBUF; i++) {
692 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
694 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
698 struct rte_mbuf *extra = NULL;
699 extra = rte_pktmbuf_alloc(pktmbuf_pool);
701 printf("Error pool not empty");
704 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
706 printf("Error pool not empty");
710 for (i=0; i<NB_MBUF; i++) {
712 rte_pktmbuf_free(m[i]);
719 * test bulk allocation and bulk free of mbufs
722 test_pktmbuf_pool_bulk(void)
724 struct rte_mempool *pool = NULL;
725 struct rte_mempool *pool2 = NULL;
728 struct rte_mbuf *mbufs[NB_MBUF];
731 /* We cannot use the preallocated mbuf pools because their caches
732 * prevent us from bulk allocating all objects in them.
733 * So we create our own mbuf pools without caches.
735 printf("Create mbuf pools for bulk allocation.\n");
736 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
737 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
739 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
743 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
744 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
746 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
751 /* Preconditions: Mempools must be full. */
752 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
753 printf("Test precondition failed: mempools not full\n");
756 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
757 rte_mempool_avail_count(pool2) == NB_MBUF)) {
758 printf("Test precondition failed: mempools: %u+%u != %u+%u",
759 rte_mempool_avail_count(pool),
760 rte_mempool_avail_count(pool2),
765 printf("Test single bulk alloc, followed by multiple bulk free.\n");
767 /* Bulk allocate all mbufs in the pool, in one go. */
768 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
770 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
773 /* Test that they have been removed from the pool. */
774 if (!rte_mempool_empty(pool)) {
775 printf("mempool not empty\n");
778 /* Bulk free all mbufs, in four steps. */
779 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
780 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
781 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
782 /* Test that they have been returned to the pool. */
783 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
784 printf("mempool avail count incorrect\n");
789 printf("Test multiple bulk alloc, followed by single bulk free.\n");
791 /* Bulk allocate all mbufs in the pool, in four steps. */
792 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
793 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
795 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
799 /* Test that they have been removed from the pool. */
800 if (!rte_mempool_empty(pool)) {
801 printf("mempool not empty\n");
804 /* Bulk free all mbufs, in one go. */
805 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
806 /* Test that they have been returned to the pool. */
807 if (!rte_mempool_full(pool)) {
808 printf("mempool not full\n");
812 printf("Test bulk free of single long chain.\n");
814 /* Bulk allocate all mbufs in the pool, in one go. */
815 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
817 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
820 /* Create a long mbuf chain. */
821 for (i = 1; i < NB_MBUF; i++) {
822 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
824 printf("rte_pktmbuf_chain() failed: %d\n", ret);
829 /* Free the mbuf chain containing all the mbufs. */
830 rte_pktmbuf_free_bulk(mbufs, 1);
831 /* Test that they have been returned to the pool. */
832 if (!rte_mempool_full(pool)) {
833 printf("mempool not full\n");
837 printf("Test bulk free of multiple chains using multiple pools.\n");
839 /* Create mbuf chains containing mbufs from different pools. */
840 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
841 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
842 for (i = 0; i < NB_MBUF * 2; i++) {
843 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
845 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
848 if ((i % CHAIN_LEN) == 0)
849 mbufs[i / CHAIN_LEN] = m;
851 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
853 /* Test that both pools have been emptied. */
854 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
855 printf("mempools not empty\n");
858 /* Free one mbuf chain. */
859 rte_pktmbuf_free_bulk(mbufs, 1);
860 /* Test that the segments have been returned to the pools. */
861 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
862 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
863 printf("all segments of first mbuf have not been returned\n");
866 /* Free the remaining mbuf chains. */
867 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
868 /* Test that they have been returned to the pools. */
869 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
870 printf("mempools not full\n");
881 printf("Free mbuf pools for bulk allocation.\n");
882 rte_mempool_free(pool);
883 rte_mempool_free(pool2);
888 * test that the pointer to the data on a packet mbuf is set properly
891 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
894 struct rte_mbuf *m[NB_MBUF];
897 for (i=0; i<NB_MBUF; i++)
900 /* alloc NB_MBUF mbufs */
901 for (i=0; i<NB_MBUF; i++) {
902 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
904 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
908 m[i]->data_off += 64;
912 for (i=0; i<NB_MBUF; i++) {
914 rte_pktmbuf_free(m[i]);
917 for (i=0; i<NB_MBUF; i++)
920 /* alloc NB_MBUF mbufs */
921 for (i=0; i<NB_MBUF; i++) {
922 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
924 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
928 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
929 printf("invalid data_off\n");
935 for (i=0; i<NB_MBUF; i++) {
937 rte_pktmbuf_free(m[i]);
944 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
947 struct rte_mbuf *m[NB_MBUF];
950 for (i=0; i<NB_MBUF; i++)
953 /* alloc NB_MBUF mbufs */
954 for (i=0; i<NB_MBUF; i++) {
955 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
957 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
963 for (i=0; i<NB_MBUF; i++) {
965 struct rte_mbuf *mb, *mt;
971 rte_pktmbuf_free_seg(mt);
980 * Stress test for rte_mbuf atomic refcnt.
981 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
982 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
985 #ifdef RTE_MBUF_REFCNT_ATOMIC
988 test_refcnt_slave(void *arg)
990 unsigned lcore, free;
992 struct rte_ring *refcnt_mbuf_ring = arg;
994 lcore = rte_lcore_id();
995 printf("%s started at lcore %u\n", __func__, lcore);
998 while (refcnt_stop_slaves == 0) {
999 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1001 rte_pktmbuf_free(mp);
1005 refcnt_lcore[lcore] += free;
1006 printf("%s finished at lcore %u, "
1007 "number of freed mbufs: %u\n",
1008 __func__, lcore, free);
1013 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1014 struct rte_mempool *refcnt_pool,
1015 struct rte_ring *refcnt_mbuf_ring)
1018 unsigned i, n, tref, wn;
1023 /* For each mbuf in the pool:
1025 * - increment it's reference up to N+1,
1026 * - enqueue it N times into the ring for slave cores to free.
1028 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1029 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1031 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1033 if ((ref & 1) != 0) {
1034 rte_pktmbuf_refcnt_update(m, ref);
1036 rte_ring_enqueue(refcnt_mbuf_ring, m);
1038 while (ref-- != 0) {
1039 rte_pktmbuf_refcnt_update(m, 1);
1040 rte_ring_enqueue(refcnt_mbuf_ring, m);
1043 rte_pktmbuf_free(m);
1047 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1048 "%u from %u mbufs\n", lcore, iter, i, n);
1050 /* wait till slave lcores will consume all mbufs */
1051 while (!rte_ring_empty(refcnt_mbuf_ring))
1054 /* check that all mbufs are back into mempool by now */
1055 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1056 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1057 refcnt_lcore[lcore] += tref;
1058 printf("%s(lcore=%u, iter=%u) completed, "
1059 "%u references processed\n",
1060 __func__, lcore, iter, tref);
1066 rte_panic("(lcore=%u, iter=%u): after %us only "
1067 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1071 test_refcnt_master(struct rte_mempool *refcnt_pool,
1072 struct rte_ring *refcnt_mbuf_ring)
1076 lcore = rte_lcore_id();
1077 printf("%s started at lcore %u\n", __func__, lcore);
1079 for (i = 0; i != REFCNT_MAX_ITER; i++)
1080 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1082 refcnt_stop_slaves = 1;
1085 printf("%s finished at lcore %u\n", __func__, lcore);
1092 test_refcnt_mbuf(void)
1094 #ifdef RTE_MBUF_REFCNT_ATOMIC
1095 unsigned int master, slave, tref;
1097 struct rte_mempool *refcnt_pool = NULL;
1098 struct rte_ring *refcnt_mbuf_ring = NULL;
1100 if (rte_lcore_count() < 2) {
1101 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1102 return TEST_SKIPPED;
1105 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1107 /* create refcnt pool & ring if they don't exist */
1109 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1110 REFCNT_MBUF_NUM, 0, 0, 0,
1112 if (refcnt_pool == NULL) {
1113 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1118 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1119 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1121 if (refcnt_mbuf_ring == NULL) {
1122 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1127 refcnt_stop_slaves = 0;
1128 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1130 rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
1133 test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
1135 rte_eal_mp_wait_lcore();
1137 /* check that we porcessed all references */
1139 master = rte_get_master_lcore();
1141 RTE_LCORE_FOREACH_SLAVE(slave)
1142 tref += refcnt_lcore[slave];
1144 if (tref != refcnt_lcore[master])
1145 rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
1146 tref, refcnt_lcore[master]);
1148 rte_mempool_dump(stdout, refcnt_pool);
1149 rte_ring_dump(stdout, refcnt_mbuf_ring);
1154 rte_mempool_free(refcnt_pool);
1155 rte_ring_free(refcnt_mbuf_ring);
1163 #include <sys/wait.h>
1165 /* use fork() to test mbuf errors panic */
1167 verify_mbuf_check_panics(struct rte_mbuf *buf)
1175 rte_mbuf_sanity_check(buf, 1); /* should panic */
1176 exit(0); /* return normally if it doesn't panic */
1177 } else if (pid < 0){
1178 printf("Fork Failed\n");
1189 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1191 struct rte_mbuf *buf;
1192 struct rte_mbuf badbuf;
1194 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1196 /* get a good mbuf to use to make copies */
1197 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1200 printf("Checking good mbuf initially\n");
1201 if (verify_mbuf_check_panics(buf) != -1)
1204 printf("Now checking for error conditions\n");
1206 if (verify_mbuf_check_panics(NULL)) {
1207 printf("Error with NULL mbuf test\n");
1213 if (verify_mbuf_check_panics(&badbuf)) {
1214 printf("Error with bad-pool mbuf test\n");
1219 badbuf.buf_iova = 0;
1220 if (verify_mbuf_check_panics(&badbuf)) {
1221 printf("Error with bad-physaddr mbuf test\n");
1226 badbuf.buf_addr = NULL;
1227 if (verify_mbuf_check_panics(&badbuf)) {
1228 printf("Error with bad-addr mbuf test\n");
1234 if (verify_mbuf_check_panics(&badbuf)) {
1235 printf("Error with bad-refcnt(0) mbuf test\n");
1240 badbuf.refcnt = UINT16_MAX;
1241 if (verify_mbuf_check_panics(&badbuf)) {
1242 printf("Error with bad-refcnt(MAX) mbuf test\n");
1250 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1254 struct rte_mbuf *m = NULL, *mbuf = NULL;
1262 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1267 printf("Number of segments must be 1 or more (is %d)\n",
1272 seg_len = pkt_len / nb_segs;
1278 /* Create chained mbuf_src and fill it generated data */
1279 for (seg = 0; remain > 0; seg++) {
1281 m = rte_pktmbuf_alloc(pktmbuf_pool);
1283 printf("Cannot create segment for source mbuf");
1287 /* Make sure if tailroom is zeroed */
1288 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1289 rte_pktmbuf_tailroom(m));
1292 if (data_len > seg_len)
1295 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1297 printf("Cannot append %d bytes to the mbuf\n",
1302 for (i = 0; i < data_len; i++)
1303 data[i] = (seg * seg_len + i) % 0x0ff;
1308 rte_pktmbuf_chain(mbuf, m);
1313 /* Create destination buffer to store coalesced data */
1314 if (rte_pktmbuf_linearize(mbuf)) {
1315 printf("Mbuf linearization failed\n");
1319 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1320 printf("Source buffer should be contiguous after "
1325 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1327 for (i = 0; i < pkt_len; i++)
1328 if (data[i] != (i % 0x0ff)) {
1329 printf("Incorrect data in linearized mbuf\n");
1333 rte_pktmbuf_free(mbuf);
1338 rte_pktmbuf_free(mbuf);
1343 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1345 struct test_mbuf_array {
1357 printf("Test mbuf linearize API\n");
1359 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1360 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1361 mbuf_array[i].nb_segs)) {
1362 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1363 mbuf_array[i].nb_segs);
1371 * Helper function for test_tx_ofload
1374 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1375 uint64_t tso, uint64_t ol3, uint64_t ol2)
1380 mb->tso_segsz = tso;
1381 mb->outer_l3_len = ol3;
1382 mb->outer_l2_len = ol2;
1386 test_tx_offload(void)
1388 struct rte_mbuf *mb;
1389 uint64_t tm, v1, v2;
1393 static volatile struct {
1400 const uint32_t num = 0x10000;
1402 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1403 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1404 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1405 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1407 printf("%s started, tx_offload = {\n"
1411 "\ttso_segsz=%#hx,\n"
1412 "\touter_l3_len=%#x,\n"
1413 "\touter_l2_len=%#x,\n"
1416 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1418 sz = sizeof(*mb) * num;
1419 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1421 printf("%s failed, out of memory\n", __func__);
1426 tm = rte_rdtsc_precise();
1428 for (i = 0; i != num; i++)
1429 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1430 txof.tso, txof.l3, txof.l2);
1432 tm = rte_rdtsc_precise() - tm;
1433 printf("%s set tx_offload by bit-fields: %u iterations, %"
1434 PRIu64 " cycles, %#Lf cycles/iter\n",
1435 __func__, num, tm, (long double)tm / num);
1437 v1 = mb[rte_rand() % num].tx_offload;
1440 tm = rte_rdtsc_precise();
1442 for (i = 0; i != num; i++)
1443 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1444 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1446 tm = rte_rdtsc_precise() - tm;
1447 printf("%s set raw tx_offload: %u iterations, %"
1448 PRIu64 " cycles, %#Lf cycles/iter\n",
1449 __func__, num, tm, (long double)tm / num);
1451 v2 = mb[rte_rand() % num].tx_offload;
1455 printf("%s finished\n"
1456 "expected tx_offload value: 0x%" PRIx64 ";\n"
1457 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1460 return (v1 == v2) ? 0 : -EINVAL;
1464 test_mbuf_validate_tx_offload(const char *test_name,
1465 struct rte_mempool *pktmbuf_pool,
1468 int expected_retval)
1470 struct rte_mbuf *m = NULL;
1473 /* alloc a mbuf and do sanity check */
1474 m = rte_pktmbuf_alloc(pktmbuf_pool);
1476 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1477 if (rte_pktmbuf_pkt_len(m) != 0)
1478 GOTO_FAIL("%s: Bad packet length\n", __func__);
1479 rte_mbuf_sanity_check(m, 0);
1480 m->ol_flags = ol_flags;
1481 m->tso_segsz = segsize;
1482 ret = rte_validate_tx_offload(m);
1483 if (ret != expected_retval)
1484 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1485 __func__, test_name, expected_retval, ret);
1486 rte_pktmbuf_free(m);
1491 rte_pktmbuf_free(m);
1498 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1500 /* test to validate tx offload flags */
1501 uint64_t ol_flags = 0;
1503 /* test to validate if IP checksum is counted only for IPV4 packet */
1504 /* set both IP checksum and IPV6 flags */
1505 ol_flags |= PKT_TX_IP_CKSUM;
1506 ol_flags |= PKT_TX_IPV6;
1507 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1509 ol_flags, 0, -EINVAL) < 0)
1510 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1511 /* resetting ol_flags for next testcase */
1514 /* test to validate if IP type is set when required */
1515 ol_flags |= PKT_TX_L4_MASK;
1516 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1518 ol_flags, 0, -EINVAL) < 0)
1519 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1521 /* test if IP type is set when TCP SEG is on */
1522 ol_flags |= PKT_TX_TCP_SEG;
1523 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1525 ol_flags, 0, -EINVAL) < 0)
1526 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1529 /* test to confirm IP type (IPV4/IPV6) is set */
1530 ol_flags = PKT_TX_L4_MASK;
1531 ol_flags |= PKT_TX_IPV6;
1532 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1534 ol_flags, 0, 0) < 0)
1535 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1538 /* test to check TSO segment size is non-zero */
1539 ol_flags |= PKT_TX_IPV4;
1540 ol_flags |= PKT_TX_TCP_SEG;
1541 /* set 0 tso segment size */
1542 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1544 ol_flags, 0, -EINVAL) < 0)
1545 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1547 /* retain IPV4 and PKT_TX_TCP_SEG mask */
1548 /* set valid tso segment size but IP CKSUM not set */
1549 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1551 ol_flags, 512, -EINVAL) < 0)
1552 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1554 /* test to validate if IP checksum is set for TSO capability */
1555 /* retain IPV4, TCP_SEG, tso_seg size */
1556 ol_flags |= PKT_TX_IP_CKSUM;
1557 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1559 ol_flags, 512, 0) < 0)
1560 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1562 /* test to confirm TSO for IPV6 type */
1564 ol_flags |= PKT_TX_IPV6;
1565 ol_flags |= PKT_TX_TCP_SEG;
1566 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1568 ol_flags, 512, 0) < 0)
1569 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1572 /* test if outer IP checksum set for non outer IPv4 packet */
1573 ol_flags |= PKT_TX_IPV6;
1574 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1575 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1577 ol_flags, 512, -EINVAL) < 0)
1578 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1581 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1582 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1583 ol_flags |= PKT_TX_OUTER_IPV4;
1584 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1586 ol_flags, 512, 0) < 0)
1587 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1590 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1591 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1593 ol_flags, 512, 0) < 0)
1594 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1601 * Test for allocating a bulk of mbufs
1602 * define an array with positive sizes for mbufs allocations.
1605 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1608 unsigned int idx, loop;
1609 unsigned int alloc_counts[] = {
1611 MEMPOOL_CACHE_SIZE - 1,
1612 MEMPOOL_CACHE_SIZE + 1,
1613 MEMPOOL_CACHE_SIZE * 1.5,
1614 MEMPOOL_CACHE_SIZE * 2,
1615 MEMPOOL_CACHE_SIZE * 2 - 1,
1616 MEMPOOL_CACHE_SIZE * 2 + 1,
1620 /* allocate a large array of mbuf pointers */
1621 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1622 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1623 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1626 for (loop = 0; loop < alloc_counts[idx] &&
1627 mbufs[loop] != NULL; loop++)
1628 rte_pktmbuf_free(mbufs[loop]);
1629 } else if (ret != 0) {
1630 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1631 __func__, alloc_counts[idx], ret);
1639 * Negative testing for allocating a bulk of mbufs
1642 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1645 unsigned int idx, loop;
1646 unsigned int neg_alloc_counts[] = {
1647 MEMPOOL_CACHE_SIZE - NB_MBUF,
1652 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1654 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1655 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1656 neg_alloc_counts[idx]);
1658 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1659 __func__, neg_alloc_counts[idx], ret);
1660 for (loop = 0; loop < neg_alloc_counts[idx] &&
1661 mbufs[loop] != NULL; loop++)
1662 rte_pktmbuf_free(mbufs[loop]);
1670 * Test to read mbuf packet using rte_pktmbuf_read
1673 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1675 struct rte_mbuf *m = NULL;
1677 const char *data_copy = NULL;
1681 m = rte_pktmbuf_alloc(pktmbuf_pool);
1683 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1684 if (rte_pktmbuf_pkt_len(m) != 0)
1685 GOTO_FAIL("%s: Bad packet length\n", __func__);
1686 rte_mbuf_sanity_check(m, 0);
1688 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1690 GOTO_FAIL("%s: Cannot append data\n", __func__);
1691 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1692 GOTO_FAIL("%s: Bad packet length\n", __func__);
1693 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1695 /* read the data from mbuf */
1696 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1697 if (data_copy == NULL)
1698 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1699 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1700 if (data_copy[off] != (char)0xfe)
1701 GOTO_FAIL("Data corrupted at offset %u", off);
1703 rte_pktmbuf_free(m);
1709 rte_pktmbuf_free(m);
1716 * Test to read mbuf packet data from offset
1719 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1721 struct rte_mbuf *m = NULL;
1722 struct ether_hdr *hdr = NULL;
1724 const char *data_copy = NULL;
1726 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1729 m = rte_pktmbuf_alloc(pktmbuf_pool);
1731 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1733 if (rte_pktmbuf_pkt_len(m) != 0)
1734 GOTO_FAIL("%s: Bad packet length\n", __func__);
1735 rte_mbuf_sanity_check(m, 0);
1737 /* prepend an ethernet header */
1738 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1740 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1741 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1742 GOTO_FAIL("%s: Bad pkt length", __func__);
1743 if (rte_pktmbuf_data_len(m) != hdr_len)
1744 GOTO_FAIL("%s: Bad data length", __func__);
1745 memset(hdr, 0xde, hdr_len);
1747 /* read mbuf header info from 0 offset */
1748 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
1749 if (data_copy == NULL)
1750 GOTO_FAIL("%s: Error in reading header!\n", __func__);
1751 for (off = 0; off < hdr_len; off++) {
1752 if (data_copy[off] != (char)0xde)
1753 GOTO_FAIL("Header info corrupted at offset %u", off);
1756 /* append sample data after ethernet header */
1757 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1759 GOTO_FAIL("%s: Cannot append data\n", __func__);
1760 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1761 GOTO_FAIL("%s: Bad packet length\n", __func__);
1762 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1763 GOTO_FAIL("%s: Bad data length\n", __func__);
1764 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
1766 /* read mbuf data after header info */
1767 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
1768 if (data_copy == NULL)
1769 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
1770 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1771 if (data_copy[off] != (char)0xcc)
1772 GOTO_FAIL("Data corrupted at offset %u", off);
1775 /* partial reading of mbuf data */
1776 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
1778 if (data_copy == NULL)
1779 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1780 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
1781 GOTO_FAIL("%s: Incorrect data length!\n", __func__);
1782 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
1783 if (data_copy[off] != (char)0xcc)
1784 GOTO_FAIL("Data corrupted at offset %u", off);
1787 /* read length greater than mbuf data_len */
1788 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
1790 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
1793 /* read length greater than mbuf pkt_len */
1794 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
1796 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
1799 /* read data of zero len from valid offset */
1800 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
1801 if (data_copy == NULL)
1802 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1803 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
1804 GOTO_FAIL("%s: Corrupted data content!\n", __func__);
1805 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1806 if (data_copy[off] != (char)0xcc)
1807 GOTO_FAIL("Data corrupted at offset %u", off);
1810 /* read data of zero length from zero offset */
1811 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
1812 if (data_copy == NULL)
1813 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1814 /* check if the received address is the beginning of header info */
1815 if (hdr != (const struct ether_hdr *)data_copy)
1816 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
1818 /* read data of max length from valid offset */
1819 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
1820 if (data_copy == NULL)
1821 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1822 /* check if the received address is the beginning of data segment */
1823 if (data_copy != data)
1824 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
1826 /* try to read from mbuf with max size offset */
1827 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
1828 if (data_copy != NULL)
1829 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1831 /* try to read from mbuf with max size offset and len */
1832 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
1833 if (data_copy != NULL)
1834 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
1836 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
1838 rte_pktmbuf_free(m);
1844 rte_pktmbuf_free(m);
1851 unsigned int seg_count;
1855 unsigned int seg_lengths[MBUF_MAX_SEG];
1858 /* create a mbuf with different sized segments
1859 * and fill with data [0x00 0x01 0x02 ...]
1861 static struct rte_mbuf *
1862 create_packet(struct rte_mempool *pktmbuf_pool,
1863 struct test_case *test_data)
1865 uint16_t i, ret, seg, seg_len = 0;
1866 uint32_t last_index = 0;
1867 unsigned int seg_lengths[MBUF_MAX_SEG];
1868 unsigned int hdr_len;
1869 struct rte_mbuf *pkt = NULL;
1870 struct rte_mbuf *pkt_seg = NULL;
1874 memcpy(seg_lengths, test_data->seg_lengths,
1875 sizeof(unsigned int)*test_data->seg_count);
1876 for (seg = 0; seg < test_data->seg_count; seg++) {
1878 seg_len = seg_lengths[seg];
1879 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
1880 if (pkt_seg == NULL)
1881 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1882 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
1883 GOTO_FAIL("%s: Bad packet length\n", __func__);
1884 rte_mbuf_sanity_check(pkt_seg, 0);
1885 /* Add header only for the first segment */
1886 if (test_data->flags == MBUF_HEADER && seg == 0) {
1887 hdr_len = sizeof(struct rte_ether_hdr);
1888 /* prepend a header and fill with dummy data */
1889 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
1891 GOTO_FAIL("%s: Cannot prepend header\n",
1893 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
1894 GOTO_FAIL("%s: Bad pkt length", __func__);
1895 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
1896 GOTO_FAIL("%s: Bad data length", __func__);
1897 for (i = 0; i < hdr_len; i++)
1898 hdr[i] = (last_index + i) % 0xffff;
1899 last_index += hdr_len;
1901 /* skip appending segment with 0 length */
1904 data = rte_pktmbuf_append(pkt_seg, seg_len);
1906 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
1907 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
1908 GOTO_FAIL("%s: Bad packet segment length: %d\n",
1909 __func__, rte_pktmbuf_pkt_len(pkt_seg));
1910 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
1911 GOTO_FAIL("%s: Bad data length\n", __func__);
1912 for (i = 0; i < seg_len; i++)
1913 data[i] = (last_index + i) % 0xffff;
1914 /* to fill continuous data from one seg to another */
1916 /* create chained mbufs */
1920 ret = rte_pktmbuf_chain(pkt, pkt_seg);
1922 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
1926 pkt_seg = pkt_seg->next;
1931 rte_pktmbuf_free(pkt);
1934 if (pkt_seg != NULL) {
1935 rte_pktmbuf_free(pkt_seg);
1942 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
1945 struct test_case test_cases[] = {
1947 .seg_lengths = { 100, 100, 100 },
1949 .flags = MBUF_NO_HEADER,
1954 .seg_lengths = { 100, 125, 150 },
1956 .flags = MBUF_NO_HEADER,
1961 .seg_lengths = { 100, 100 },
1963 .flags = MBUF_NO_HEADER,
1968 .seg_lengths = { 100, 200 },
1970 .flags = MBUF_HEADER,
1971 .read_off = sizeof(struct rte_ether_hdr),
1975 .seg_lengths = { 1000, 100 },
1977 .flags = MBUF_NO_HEADER,
1982 .seg_lengths = { 1024, 0, 100 },
1984 .flags = MBUF_NO_HEADER,
1989 .seg_lengths = { 1000, 1, 1000 },
1991 .flags = MBUF_NO_HEADER,
1996 .seg_lengths = { MBUF_TEST_DATA_LEN,
1997 MBUF_TEST_DATA_LEN2,
1998 MBUF_TEST_DATA_LEN3, 800, 10 },
2000 .flags = MBUF_NEG_TEST_READ,
2002 .read_len = MBUF_DATA_SIZE
2007 const char *data_copy = NULL;
2008 char data_buf[MBUF_DATA_SIZE];
2010 memset(data_buf, 0, MBUF_DATA_SIZE);
2012 for (i = 0; i < RTE_DIM(test_cases); i++) {
2013 m = create_packet(pktmbuf_pool, &test_cases[i]);
2015 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2017 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2018 test_cases[i].read_len, data_buf);
2019 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2020 if (data_copy != NULL)
2021 GOTO_FAIL("%s: mbuf data read should fail!\n",
2024 rte_pktmbuf_free(m);
2029 if (data_copy == NULL)
2030 GOTO_FAIL("%s: Error in reading packet data!\n",
2032 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2033 if (data_copy[pos] !=
2034 (char)((test_cases[i].read_off + pos)
2036 GOTO_FAIL("Data corrupted at offset %u is %2X",
2037 pos, data_copy[pos]);
2039 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2040 rte_pktmbuf_free(m);
2047 rte_pktmbuf_free(m);
2053 /* Define a free call back function to be used for external buffer */
2055 ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque)
2057 void *ext_buf_addr = opaque;
2059 if (ext_buf_addr == NULL) {
2060 printf("External buffer address is invalid\n");
2063 rte_free(ext_buf_addr);
2064 ext_buf_addr = NULL;
2065 printf("External buffer freed via callback\n");
2069 * Test to initialize shared data in external buffer before attaching to mbuf
2070 * - Allocate mbuf with no data.
2071 * - Allocate external buffer with size should be large enough to accommodate
2072 * rte_mbuf_ext_shared_info.
2073 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2074 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2075 * - Clone another mbuf and attach the same external buffer to it.
2076 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2079 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2081 struct rte_mbuf *m = NULL;
2082 struct rte_mbuf *clone = NULL;
2083 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2084 rte_iova_t buf_iova;
2085 void *ext_buf_addr = NULL;
2086 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2087 sizeof(struct rte_mbuf_ext_shared_info);
2090 m = rte_pktmbuf_alloc(pktmbuf_pool);
2092 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2093 if (rte_pktmbuf_pkt_len(m) != 0)
2094 GOTO_FAIL("%s: Bad packet length\n", __func__);
2095 rte_mbuf_sanity_check(m, 0);
2097 ext_buf_addr = rte_malloc("External buffer", buf_len,
2098 RTE_CACHE_LINE_SIZE);
2099 if (ext_buf_addr == NULL)
2100 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2102 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2103 ext_buf_free_callback_fn, ext_buf_addr);
2104 if (ret_shinfo == NULL)
2105 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2107 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2108 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2110 if (rte_mbuf_refcnt_read(m) != 1)
2111 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2113 buf_iova = rte_mempool_virt2iova(ext_buf_addr);
2114 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2116 if (m->ol_flags != EXT_ATTACHED_MBUF)
2117 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2120 /* allocate one more mbuf */
2121 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2123 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2124 if (rte_pktmbuf_pkt_len(clone) != 0)
2125 GOTO_FAIL("%s: Bad packet length\n", __func__);
2127 /* attach the same external buffer to the cloned mbuf */
2128 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
2130 if (clone->ol_flags != EXT_ATTACHED_MBUF)
2131 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2134 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2135 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2137 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2138 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2139 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2140 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2142 /* reset the ext_refcnt before freeing the external buffer */
2143 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2144 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2145 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2147 /* detach the external buffer from mbufs */
2148 rte_pktmbuf_detach_extbuf(m);
2149 /* check if ref cnt is decremented */
2150 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2151 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2153 rte_pktmbuf_detach_extbuf(clone);
2154 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0)
2155 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2157 rte_pktmbuf_free(m);
2159 rte_pktmbuf_free(clone);
2166 rte_pktmbuf_free(m);
2170 rte_pktmbuf_free(clone);
2173 if (ext_buf_addr != NULL) {
2174 rte_free(ext_buf_addr);
2175 ext_buf_addr = NULL;
2184 struct rte_mempool *pktmbuf_pool = NULL;
2185 struct rte_mempool *pktmbuf_pool2 = NULL;
2188 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2190 /* create pktmbuf pool if it does not exist */
2191 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2192 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2195 if (pktmbuf_pool == NULL) {
2196 printf("cannot allocate mbuf pool\n");
2200 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2202 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2203 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2206 if (pktmbuf_pool2 == NULL) {
2207 printf("cannot allocate mbuf pool\n");
2211 /* test multiple mbuf alloc */
2212 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2213 printf("test_mbuf_pool() failed\n");
2217 /* do it another time to check that all mbufs were freed */
2218 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2219 printf("test_mbuf_pool() failed (2)\n");
2223 /* test bulk mbuf alloc and free */
2224 if (test_pktmbuf_pool_bulk() < 0) {
2225 printf("test_pktmbuf_pool_bulk() failed\n");
2229 /* test that the pointer to the data on a packet mbuf is set properly */
2230 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2231 printf("test_pktmbuf_pool_ptr() failed\n");
2235 /* test data manipulation in mbuf */
2236 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2237 printf("test_one_mbuf() failed\n");
2243 * do it another time, to check that allocation reinitialize
2244 * the mbuf correctly
2246 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2247 printf("test_one_mbuf() failed (2)\n");
2251 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2252 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2256 /* test free pktmbuf segment one by one */
2257 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2258 printf("test_pktmbuf_free_segment() failed.\n");
2262 if (testclone_testupdate_testdetach(pktmbuf_pool) < 0) {
2263 printf("testclone_and_testupdate() failed \n");
2267 if (test_pktmbuf_copy(pktmbuf_pool) < 0) {
2268 printf("test_pktmbuf_copy() failed\n");
2272 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2273 printf("test_attach_from_different_pool() failed\n");
2277 if (test_refcnt_mbuf() < 0) {
2278 printf("test_refcnt_mbuf() failed \n");
2282 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2283 printf("test_failing_mbuf_sanity_check() failed\n");
2287 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2288 printf("test_mbuf_linearize_check() failed\n");
2292 if (test_tx_offload() < 0) {
2293 printf("test_tx_offload() failed\n");
2297 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2298 printf("test_mbuf_validate_tx_offload_one() failed\n");
2302 /* test for allocating a bulk of mbufs with various sizes */
2303 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2304 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2308 /* test for allocating a bulk of mbufs with various sizes */
2309 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2310 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2314 /* test to read mbuf packet */
2315 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2316 printf("test_rte_pktmbuf_read() failed\n");
2320 /* test to read mbuf packet from offset */
2321 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2322 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2326 /* test to read data from chain of mbufs with data segments */
2327 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2328 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2332 /* test to initialize shared info. at the end of external buffer */
2333 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2334 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2340 rte_mempool_free(pktmbuf_pool);
2341 rte_mempool_free(pktmbuf_pool2);
2346 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);