1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_errno.h>
16 #include <rte_debug.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
27 #include <rte_mempool.h>
29 #include <rte_random.h>
30 #include <rte_cycles.h>
31 #include <rte_malloc.h>
32 #include <rte_ether.h>
38 #define MEMPOOL_CACHE_SIZE 32
39 #define MBUF_DATA_SIZE 2048
41 #define MBUF_TEST_DATA_LEN 1464
42 #define MBUF_TEST_DATA_LEN2 50
43 #define MBUF_TEST_DATA_LEN3 256
44 #define MBUF_TEST_HDR1_LEN 20
45 #define MBUF_TEST_HDR2_LEN 30
46 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
47 #define MBUF_TEST_SEG_SIZE 64
48 #define MBUF_TEST_BURST 8
49 #define EXT_BUF_TEST_DATA_LEN 1024
50 #define MBUF_MAX_SEG 16
51 #define MBUF_NO_HEADER 0
53 #define MBUF_NEG_TEST_READ 2
54 #define VAL_NAME(flag) { flag, #flag }
56 /* chain length in bulk test */
59 /* size of private data for mbuf in pktmbuf_pool2 */
60 #define MBUF2_PRIV_SIZE 128
62 #define REFCNT_MAX_ITER 64
63 #define REFCNT_MAX_TIMEOUT 10
64 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
65 #define REFCNT_MBUF_NUM 64
66 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
68 #define MAGIC_DATA 0x42424242
70 #define MAKE_STRING(x) # x
72 #ifdef RTE_MBUF_REFCNT_ATOMIC
74 static volatile uint32_t refcnt_stop_slaves;
75 static unsigned refcnt_lcore[RTE_MAX_LCORE];
83 * #. Allocate a mbuf pool.
85 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
88 * #. Test multiple allocations of mbufs from this pool.
90 * - Allocate NB_MBUF and store pointers in a table.
91 * - If an allocation fails, return an error.
92 * - Free all these mbufs.
93 * - Repeat the same test to check that mbufs were freed correctly.
95 * #. Test data manipulation in pktmbuf.
98 * - Append data using rte_pktmbuf_append().
99 * - Test for error in rte_pktmbuf_append() when len is too large.
100 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
101 * - Test for error in rte_pktmbuf_trim() when len is too large.
102 * - Prepend a header using rte_pktmbuf_prepend().
103 * - Test for error in rte_pktmbuf_prepend() when len is too large.
104 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
105 * - Test for error in rte_pktmbuf_adj() when len is too large.
106 * - Check that appended data is not corrupt.
108 * - Between all these tests, check data_len and pkt_len, and
109 * that the mbuf is contiguous.
110 * - Repeat the test to check that allocation operations
111 * reinitialize the mbuf correctly.
113 * #. Test packet cloning
114 * - Clone a mbuf and verify the data
115 * - Clone the cloned mbuf and verify the data
116 * - Attach a mbuf to another that does not have the same priv_size.
119 #define GOTO_FAIL(str, ...) do { \
120 printf("mbuf test FAILED (l.%d): <" str ">\n", \
121 __LINE__, ##__VA_ARGS__); \
126 * test data manipulation in mbuf with non-ascii data
129 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
131 struct rte_mbuf *m = NULL;
134 m = rte_pktmbuf_alloc(pktmbuf_pool);
136 GOTO_FAIL("Cannot allocate mbuf");
137 if (rte_pktmbuf_pkt_len(m) != 0)
138 GOTO_FAIL("Bad length");
140 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
142 GOTO_FAIL("Cannot append data");
143 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
144 GOTO_FAIL("Bad pkt length");
145 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
146 GOTO_FAIL("Bad data length");
147 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
148 if (!rte_pktmbuf_is_contiguous(m))
149 GOTO_FAIL("Buffer should be continuous");
150 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
164 * test data manipulation in mbuf
167 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
169 struct rte_mbuf *m = NULL;
170 char *data, *data2, *hdr;
173 printf("Test pktmbuf API\n");
177 m = rte_pktmbuf_alloc(pktmbuf_pool);
179 GOTO_FAIL("Cannot allocate mbuf");
180 if (rte_pktmbuf_pkt_len(m) != 0)
181 GOTO_FAIL("Bad length");
183 rte_pktmbuf_dump(stdout, m, 0);
187 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
189 GOTO_FAIL("Cannot append data");
190 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
191 GOTO_FAIL("Bad pkt length");
192 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
193 GOTO_FAIL("Bad data length");
194 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
195 if (!rte_pktmbuf_is_contiguous(m))
196 GOTO_FAIL("Buffer should be continuous");
197 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
198 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
200 /* this append should fail */
202 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
204 GOTO_FAIL("Append should not succeed");
206 /* append some more data */
208 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
210 GOTO_FAIL("Cannot append data");
211 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
212 GOTO_FAIL("Bad pkt length");
213 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
214 GOTO_FAIL("Bad data length");
215 if (!rte_pktmbuf_is_contiguous(m))
216 GOTO_FAIL("Buffer should be continuous");
218 /* trim data at the end of mbuf */
220 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
221 GOTO_FAIL("Cannot trim data");
222 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
223 GOTO_FAIL("Bad pkt length");
224 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
225 GOTO_FAIL("Bad data length");
226 if (!rte_pktmbuf_is_contiguous(m))
227 GOTO_FAIL("Buffer should be continuous");
229 /* this trim should fail */
231 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
232 GOTO_FAIL("trim should not succeed");
234 /* prepend one header */
236 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
238 GOTO_FAIL("Cannot prepend");
239 if (data - hdr != MBUF_TEST_HDR1_LEN)
240 GOTO_FAIL("Prepend failed");
241 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
242 GOTO_FAIL("Bad pkt length");
243 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
244 GOTO_FAIL("Bad data length");
245 if (!rte_pktmbuf_is_contiguous(m))
246 GOTO_FAIL("Buffer should be continuous");
247 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
249 /* prepend another header */
251 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
253 GOTO_FAIL("Cannot prepend");
254 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
255 GOTO_FAIL("Prepend failed");
256 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
257 GOTO_FAIL("Bad pkt length");
258 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
259 GOTO_FAIL("Bad data length");
260 if (!rte_pktmbuf_is_contiguous(m))
261 GOTO_FAIL("Buffer should be continuous");
262 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
264 rte_mbuf_sanity_check(m, 1);
265 rte_mbuf_sanity_check(m, 0);
266 rte_pktmbuf_dump(stdout, m, 0);
268 /* this prepend should fail */
270 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
272 GOTO_FAIL("prepend should not succeed");
274 /* remove data at beginning of mbuf (adj) */
276 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
277 GOTO_FAIL("rte_pktmbuf_adj failed");
278 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
279 GOTO_FAIL("Bad pkt length");
280 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
281 GOTO_FAIL("Bad data length");
282 if (!rte_pktmbuf_is_contiguous(m))
283 GOTO_FAIL("Buffer should be continuous");
285 /* this adj should fail */
287 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
288 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
292 if (!rte_pktmbuf_is_contiguous(m))
293 GOTO_FAIL("Buffer should be continuous");
295 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
297 GOTO_FAIL("Data corrupted at offset %u", i);
313 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool)
315 struct rte_mbuf *m = NULL;
316 struct rte_mbuf *clone = NULL;
317 struct rte_mbuf *clone2 = NULL;
318 unaligned_uint32_t *data;
321 m = rte_pktmbuf_alloc(pktmbuf_pool);
323 GOTO_FAIL("ooops not allocating mbuf");
325 if (rte_pktmbuf_pkt_len(m) != 0)
326 GOTO_FAIL("Bad length");
328 rte_pktmbuf_append(m, sizeof(uint32_t));
329 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
332 /* clone the allocated mbuf */
333 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
335 GOTO_FAIL("cannot clone data\n");
337 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
338 if (*data != MAGIC_DATA)
339 GOTO_FAIL("invalid data in clone\n");
341 if (rte_mbuf_refcnt_read(m) != 2)
342 GOTO_FAIL("invalid refcnt in m\n");
345 rte_pktmbuf_free(clone);
348 /* same test with a chained mbuf */
349 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
351 GOTO_FAIL("Next Pkt Null\n");
354 rte_pktmbuf_append(m->next, sizeof(uint32_t));
355 m->pkt_len = 2 * sizeof(uint32_t);
357 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
360 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
362 GOTO_FAIL("cannot clone data\n");
364 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
365 if (*data != MAGIC_DATA)
366 GOTO_FAIL("invalid data in clone\n");
368 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
369 if (*data != MAGIC_DATA)
370 GOTO_FAIL("invalid data in clone->next\n");
372 if (rte_mbuf_refcnt_read(m) != 2)
373 GOTO_FAIL("invalid refcnt in m\n");
375 if (rte_mbuf_refcnt_read(m->next) != 2)
376 GOTO_FAIL("invalid refcnt in m->next\n");
378 /* try to clone the clone */
380 clone2 = rte_pktmbuf_clone(clone, pktmbuf_pool);
382 GOTO_FAIL("cannot clone the clone\n");
384 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
385 if (*data != MAGIC_DATA)
386 GOTO_FAIL("invalid data in clone2\n");
388 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
389 if (*data != MAGIC_DATA)
390 GOTO_FAIL("invalid data in clone2->next\n");
392 if (rte_mbuf_refcnt_read(m) != 3)
393 GOTO_FAIL("invalid refcnt in m\n");
395 if (rte_mbuf_refcnt_read(m->next) != 3)
396 GOTO_FAIL("invalid refcnt in m->next\n");
400 rte_pktmbuf_free(clone);
401 rte_pktmbuf_free(clone2);
406 printf("%s ok\n", __func__);
413 rte_pktmbuf_free(clone);
415 rte_pktmbuf_free(clone2);
420 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool)
422 struct rte_mbuf *m = NULL;
423 struct rte_mbuf *copy = NULL;
424 struct rte_mbuf *copy2 = NULL;
425 struct rte_mbuf *clone = NULL;
426 unaligned_uint32_t *data;
429 m = rte_pktmbuf_alloc(pktmbuf_pool);
431 GOTO_FAIL("ooops not allocating mbuf");
433 if (rte_pktmbuf_pkt_len(m) != 0)
434 GOTO_FAIL("Bad length");
436 rte_pktmbuf_append(m, sizeof(uint32_t));
437 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
440 /* copy the allocated mbuf */
441 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
443 GOTO_FAIL("cannot copy data\n");
445 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
446 GOTO_FAIL("copy length incorrect\n");
448 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
449 GOTO_FAIL("copy data length incorrect\n");
451 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
452 if (*data != MAGIC_DATA)
453 GOTO_FAIL("invalid data in copy\n");
456 rte_pktmbuf_free(copy);
459 /* same test with a cloned mbuf */
460 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
462 GOTO_FAIL("cannot clone data\n");
464 if (!RTE_MBUF_CLONED(clone))
465 GOTO_FAIL("clone did not give a cloned mbuf\n");
467 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
469 GOTO_FAIL("cannot copy cloned mbuf\n");
471 if (RTE_MBUF_CLONED(copy))
472 GOTO_FAIL("copy of clone is cloned?\n");
474 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
475 GOTO_FAIL("copy clone length incorrect\n");
477 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
478 GOTO_FAIL("copy clone data length incorrect\n");
480 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
481 if (*data != MAGIC_DATA)
482 GOTO_FAIL("invalid data in clone copy\n");
483 rte_pktmbuf_free(clone);
484 rte_pktmbuf_free(copy);
489 /* same test with a chained mbuf */
490 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
492 GOTO_FAIL("Next Pkt Null\n");
495 rte_pktmbuf_append(m->next, sizeof(uint32_t));
496 m->pkt_len = 2 * sizeof(uint32_t);
497 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
498 *data = MAGIC_DATA + 1;
500 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
502 GOTO_FAIL("cannot copy data\n");
504 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
505 GOTO_FAIL("chain copy length incorrect\n");
507 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
508 GOTO_FAIL("chain copy data length incorrect\n");
510 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
511 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
512 GOTO_FAIL("invalid data in copy\n");
514 rte_pktmbuf_free(copy2);
516 /* test offset copy */
517 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
518 sizeof(uint32_t), UINT32_MAX);
520 GOTO_FAIL("cannot copy the copy\n");
522 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
523 GOTO_FAIL("copy with offset, length incorrect\n");
525 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
526 GOTO_FAIL("copy with offset, data length incorrect\n");
528 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
529 if (data[0] != MAGIC_DATA + 1)
530 GOTO_FAIL("copy with offset, invalid data\n");
532 rte_pktmbuf_free(copy2);
534 /* test truncation copy */
535 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
536 0, sizeof(uint32_t));
538 GOTO_FAIL("cannot copy the copy\n");
540 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
541 GOTO_FAIL("copy with truncate, length incorrect\n");
543 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
544 GOTO_FAIL("copy with truncate, data length incorrect\n");
546 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
547 if (data[0] != MAGIC_DATA)
548 GOTO_FAIL("copy with truncate, invalid data\n");
552 rte_pktmbuf_free(copy);
553 rte_pktmbuf_free(copy2);
558 printf("%s ok\n", __func__);
565 rte_pktmbuf_free(copy);
567 rte_pktmbuf_free(copy2);
572 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
573 struct rte_mempool *pktmbuf_pool2)
575 struct rte_mbuf *m = NULL;
576 struct rte_mbuf *clone = NULL;
577 struct rte_mbuf *clone2 = NULL;
578 char *data, *c_data, *c_data2;
581 m = rte_pktmbuf_alloc(pktmbuf_pool);
583 GOTO_FAIL("cannot allocate mbuf");
585 if (rte_pktmbuf_pkt_len(m) != 0)
586 GOTO_FAIL("Bad length");
588 data = rte_pktmbuf_mtod(m, char *);
590 /* allocate a new mbuf from the second pool, and attach it to the first
592 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
594 GOTO_FAIL("cannot allocate mbuf from second pool\n");
596 /* check data room size and priv size, and erase priv */
597 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
598 GOTO_FAIL("data room size should be 0\n");
599 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
600 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
601 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
603 /* save data pointer to compare it after detach() */
604 c_data = rte_pktmbuf_mtod(clone, char *);
605 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
606 GOTO_FAIL("bad data pointer in clone");
607 if (rte_pktmbuf_headroom(clone) != 0)
608 GOTO_FAIL("bad headroom in clone");
610 rte_pktmbuf_attach(clone, m);
612 if (rte_pktmbuf_mtod(clone, char *) != data)
613 GOTO_FAIL("clone was not attached properly\n");
614 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
615 GOTO_FAIL("bad headroom in clone after attach");
616 if (rte_mbuf_refcnt_read(m) != 2)
617 GOTO_FAIL("invalid refcnt in m\n");
619 /* allocate a new mbuf from the second pool, and attach it to the first
621 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
623 GOTO_FAIL("cannot allocate clone2 from second pool\n");
625 /* check data room size and priv size, and erase priv */
626 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
627 GOTO_FAIL("data room size should be 0\n");
628 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
629 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
630 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
632 /* save data pointer to compare it after detach() */
633 c_data2 = rte_pktmbuf_mtod(clone2, char *);
634 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
635 GOTO_FAIL("bad data pointer in clone2");
636 if (rte_pktmbuf_headroom(clone2) != 0)
637 GOTO_FAIL("bad headroom in clone2");
639 rte_pktmbuf_attach(clone2, clone);
641 if (rte_pktmbuf_mtod(clone2, char *) != data)
642 GOTO_FAIL("clone2 was not attached properly\n");
643 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
644 GOTO_FAIL("bad headroom in clone2 after attach");
645 if (rte_mbuf_refcnt_read(m) != 3)
646 GOTO_FAIL("invalid refcnt in m\n");
648 /* detach the clones */
649 rte_pktmbuf_detach(clone);
650 if (c_data != rte_pktmbuf_mtod(clone, char *))
651 GOTO_FAIL("clone was not detached properly\n");
652 if (rte_mbuf_refcnt_read(m) != 2)
653 GOTO_FAIL("invalid refcnt in m\n");
655 rte_pktmbuf_detach(clone2);
656 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
657 GOTO_FAIL("clone2 was not detached properly\n");
658 if (rte_mbuf_refcnt_read(m) != 1)
659 GOTO_FAIL("invalid refcnt in m\n");
661 /* free the clones and the initial mbuf */
662 rte_pktmbuf_free(clone2);
663 rte_pktmbuf_free(clone);
665 printf("%s ok\n", __func__);
672 rte_pktmbuf_free(clone);
674 rte_pktmbuf_free(clone2);
679 * test allocation and free of mbufs
682 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
685 struct rte_mbuf *m[NB_MBUF];
688 for (i=0; i<NB_MBUF; i++)
691 /* alloc NB_MBUF mbufs */
692 for (i=0; i<NB_MBUF; i++) {
693 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
695 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
699 struct rte_mbuf *extra = NULL;
700 extra = rte_pktmbuf_alloc(pktmbuf_pool);
702 printf("Error pool not empty");
705 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
707 printf("Error pool not empty");
711 for (i=0; i<NB_MBUF; i++) {
713 rte_pktmbuf_free(m[i]);
720 * test bulk allocation and bulk free of mbufs
723 test_pktmbuf_pool_bulk(void)
725 struct rte_mempool *pool = NULL;
726 struct rte_mempool *pool2 = NULL;
729 struct rte_mbuf *mbufs[NB_MBUF];
732 /* We cannot use the preallocated mbuf pools because their caches
733 * prevent us from bulk allocating all objects in them.
734 * So we create our own mbuf pools without caches.
736 printf("Create mbuf pools for bulk allocation.\n");
737 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
738 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
740 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
744 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
745 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
747 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
752 /* Preconditions: Mempools must be full. */
753 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
754 printf("Test precondition failed: mempools not full\n");
757 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
758 rte_mempool_avail_count(pool2) == NB_MBUF)) {
759 printf("Test precondition failed: mempools: %u+%u != %u+%u",
760 rte_mempool_avail_count(pool),
761 rte_mempool_avail_count(pool2),
766 printf("Test single bulk alloc, followed by multiple bulk free.\n");
768 /* Bulk allocate all mbufs in the pool, in one go. */
769 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
771 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
774 /* Test that they have been removed from the pool. */
775 if (!rte_mempool_empty(pool)) {
776 printf("mempool not empty\n");
779 /* Bulk free all mbufs, in four steps. */
780 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
781 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
782 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
783 /* Test that they have been returned to the pool. */
784 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
785 printf("mempool avail count incorrect\n");
790 printf("Test multiple bulk alloc, followed by single bulk free.\n");
792 /* Bulk allocate all mbufs in the pool, in four steps. */
793 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
794 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
796 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
800 /* Test that they have been removed from the pool. */
801 if (!rte_mempool_empty(pool)) {
802 printf("mempool not empty\n");
805 /* Bulk free all mbufs, in one go. */
806 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
807 /* Test that they have been returned to the pool. */
808 if (!rte_mempool_full(pool)) {
809 printf("mempool not full\n");
813 printf("Test bulk free of single long chain.\n");
815 /* Bulk allocate all mbufs in the pool, in one go. */
816 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
818 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
821 /* Create a long mbuf chain. */
822 for (i = 1; i < NB_MBUF; i++) {
823 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
825 printf("rte_pktmbuf_chain() failed: %d\n", ret);
830 /* Free the mbuf chain containing all the mbufs. */
831 rte_pktmbuf_free_bulk(mbufs, 1);
832 /* Test that they have been returned to the pool. */
833 if (!rte_mempool_full(pool)) {
834 printf("mempool not full\n");
838 printf("Test bulk free of multiple chains using multiple pools.\n");
840 /* Create mbuf chains containing mbufs from different pools. */
841 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
842 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
843 for (i = 0; i < NB_MBUF * 2; i++) {
844 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
846 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
849 if ((i % CHAIN_LEN) == 0)
850 mbufs[i / CHAIN_LEN] = m;
852 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
854 /* Test that both pools have been emptied. */
855 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
856 printf("mempools not empty\n");
859 /* Free one mbuf chain. */
860 rte_pktmbuf_free_bulk(mbufs, 1);
861 /* Test that the segments have been returned to the pools. */
862 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
863 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
864 printf("all segments of first mbuf have not been returned\n");
867 /* Free the remaining mbuf chains. */
868 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
869 /* Test that they have been returned to the pools. */
870 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
871 printf("mempools not full\n");
882 printf("Free mbuf pools for bulk allocation.\n");
883 rte_mempool_free(pool);
884 rte_mempool_free(pool2);
889 * test that the pointer to the data on a packet mbuf is set properly
892 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
895 struct rte_mbuf *m[NB_MBUF];
898 for (i=0; i<NB_MBUF; i++)
901 /* alloc NB_MBUF mbufs */
902 for (i=0; i<NB_MBUF; i++) {
903 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
905 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
909 m[i]->data_off += 64;
913 for (i=0; i<NB_MBUF; i++) {
915 rte_pktmbuf_free(m[i]);
918 for (i=0; i<NB_MBUF; i++)
921 /* alloc NB_MBUF mbufs */
922 for (i=0; i<NB_MBUF; i++) {
923 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
925 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
929 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
930 printf("invalid data_off\n");
936 for (i=0; i<NB_MBUF; i++) {
938 rte_pktmbuf_free(m[i]);
945 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
948 struct rte_mbuf *m[NB_MBUF];
951 for (i=0; i<NB_MBUF; i++)
954 /* alloc NB_MBUF mbufs */
955 for (i=0; i<NB_MBUF; i++) {
956 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
958 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
964 for (i=0; i<NB_MBUF; i++) {
966 struct rte_mbuf *mb, *mt;
972 rte_pktmbuf_free_seg(mt);
981 * Stress test for rte_mbuf atomic refcnt.
982 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
983 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
986 #ifdef RTE_MBUF_REFCNT_ATOMIC
989 test_refcnt_slave(void *arg)
991 unsigned lcore, free;
993 struct rte_ring *refcnt_mbuf_ring = arg;
995 lcore = rte_lcore_id();
996 printf("%s started at lcore %u\n", __func__, lcore);
999 while (refcnt_stop_slaves == 0) {
1000 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1002 rte_pktmbuf_free(mp);
1006 refcnt_lcore[lcore] += free;
1007 printf("%s finished at lcore %u, "
1008 "number of freed mbufs: %u\n",
1009 __func__, lcore, free);
1014 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1015 struct rte_mempool *refcnt_pool,
1016 struct rte_ring *refcnt_mbuf_ring)
1019 unsigned i, n, tref, wn;
1024 /* For each mbuf in the pool:
1026 * - increment it's reference up to N+1,
1027 * - enqueue it N times into the ring for slave cores to free.
1029 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1030 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1032 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1034 if ((ref & 1) != 0) {
1035 rte_pktmbuf_refcnt_update(m, ref);
1037 rte_ring_enqueue(refcnt_mbuf_ring, m);
1039 while (ref-- != 0) {
1040 rte_pktmbuf_refcnt_update(m, 1);
1041 rte_ring_enqueue(refcnt_mbuf_ring, m);
1044 rte_pktmbuf_free(m);
1048 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1049 "%u from %u mbufs\n", lcore, iter, i, n);
1051 /* wait till slave lcores will consume all mbufs */
1052 while (!rte_ring_empty(refcnt_mbuf_ring))
1055 /* check that all mbufs are back into mempool by now */
1056 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1057 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1058 refcnt_lcore[lcore] += tref;
1059 printf("%s(lcore=%u, iter=%u) completed, "
1060 "%u references processed\n",
1061 __func__, lcore, iter, tref);
1067 rte_panic("(lcore=%u, iter=%u): after %us only "
1068 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1072 test_refcnt_master(struct rte_mempool *refcnt_pool,
1073 struct rte_ring *refcnt_mbuf_ring)
1077 lcore = rte_lcore_id();
1078 printf("%s started at lcore %u\n", __func__, lcore);
1080 for (i = 0; i != REFCNT_MAX_ITER; i++)
1081 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1083 refcnt_stop_slaves = 1;
1086 printf("%s finished at lcore %u\n", __func__, lcore);
1093 test_refcnt_mbuf(void)
1095 #ifdef RTE_MBUF_REFCNT_ATOMIC
1096 unsigned int master, slave, tref;
1098 struct rte_mempool *refcnt_pool = NULL;
1099 struct rte_ring *refcnt_mbuf_ring = NULL;
1101 if (rte_lcore_count() < 2) {
1102 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1103 return TEST_SKIPPED;
1106 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1108 /* create refcnt pool & ring if they don't exist */
1110 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1111 REFCNT_MBUF_NUM, 0, 0, 0,
1113 if (refcnt_pool == NULL) {
1114 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1119 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1120 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1122 if (refcnt_mbuf_ring == NULL) {
1123 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1128 refcnt_stop_slaves = 0;
1129 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1131 rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
1134 test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
1136 rte_eal_mp_wait_lcore();
1138 /* check that we porcessed all references */
1140 master = rte_get_master_lcore();
1142 RTE_LCORE_FOREACH_SLAVE(slave)
1143 tref += refcnt_lcore[slave];
1145 if (tref != refcnt_lcore[master])
1146 rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
1147 tref, refcnt_lcore[master]);
1149 rte_mempool_dump(stdout, refcnt_pool);
1150 rte_ring_dump(stdout, refcnt_mbuf_ring);
1155 rte_mempool_free(refcnt_pool);
1156 rte_ring_free(refcnt_mbuf_ring);
1164 #include <sys/wait.h>
1166 /* use fork() to test mbuf errors panic */
1168 verify_mbuf_check_panics(struct rte_mbuf *buf)
1176 rte_mbuf_sanity_check(buf, 1); /* should panic */
1177 exit(0); /* return normally if it doesn't panic */
1178 } else if (pid < 0){
1179 printf("Fork Failed\n");
1190 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1192 struct rte_mbuf *buf;
1193 struct rte_mbuf badbuf;
1195 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1197 /* get a good mbuf to use to make copies */
1198 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1201 printf("Checking good mbuf initially\n");
1202 if (verify_mbuf_check_panics(buf) != -1)
1205 printf("Now checking for error conditions\n");
1207 if (verify_mbuf_check_panics(NULL)) {
1208 printf("Error with NULL mbuf test\n");
1214 if (verify_mbuf_check_panics(&badbuf)) {
1215 printf("Error with bad-pool mbuf test\n");
1220 badbuf.buf_iova = 0;
1221 if (verify_mbuf_check_panics(&badbuf)) {
1222 printf("Error with bad-physaddr mbuf test\n");
1227 badbuf.buf_addr = NULL;
1228 if (verify_mbuf_check_panics(&badbuf)) {
1229 printf("Error with bad-addr mbuf test\n");
1235 if (verify_mbuf_check_panics(&badbuf)) {
1236 printf("Error with bad-refcnt(0) mbuf test\n");
1241 badbuf.refcnt = UINT16_MAX;
1242 if (verify_mbuf_check_panics(&badbuf)) {
1243 printf("Error with bad-refcnt(MAX) mbuf test\n");
1251 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1255 struct rte_mbuf *m = NULL, *mbuf = NULL;
1263 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1268 printf("Number of segments must be 1 or more (is %d)\n",
1273 seg_len = pkt_len / nb_segs;
1279 /* Create chained mbuf_src and fill it generated data */
1280 for (seg = 0; remain > 0; seg++) {
1282 m = rte_pktmbuf_alloc(pktmbuf_pool);
1284 printf("Cannot create segment for source mbuf");
1288 /* Make sure if tailroom is zeroed */
1289 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1290 rte_pktmbuf_tailroom(m));
1293 if (data_len > seg_len)
1296 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1298 printf("Cannot append %d bytes to the mbuf\n",
1303 for (i = 0; i < data_len; i++)
1304 data[i] = (seg * seg_len + i) % 0x0ff;
1309 rte_pktmbuf_chain(mbuf, m);
1314 /* Create destination buffer to store coalesced data */
1315 if (rte_pktmbuf_linearize(mbuf)) {
1316 printf("Mbuf linearization failed\n");
1320 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1321 printf("Source buffer should be contiguous after "
1326 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1328 for (i = 0; i < pkt_len; i++)
1329 if (data[i] != (i % 0x0ff)) {
1330 printf("Incorrect data in linearized mbuf\n");
1334 rte_pktmbuf_free(mbuf);
1339 rte_pktmbuf_free(mbuf);
1344 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1346 struct test_mbuf_array {
1358 printf("Test mbuf linearize API\n");
1360 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1361 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1362 mbuf_array[i].nb_segs)) {
1363 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1364 mbuf_array[i].nb_segs);
1372 * Helper function for test_tx_ofload
1375 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1376 uint64_t tso, uint64_t ol3, uint64_t ol2)
1381 mb->tso_segsz = tso;
1382 mb->outer_l3_len = ol3;
1383 mb->outer_l2_len = ol2;
1387 test_tx_offload(void)
1389 struct rte_mbuf *mb;
1390 uint64_t tm, v1, v2;
1394 static volatile struct {
1401 const uint32_t num = 0x10000;
1403 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1404 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1405 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1406 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1408 printf("%s started, tx_offload = {\n"
1412 "\ttso_segsz=%#hx,\n"
1413 "\touter_l3_len=%#x,\n"
1414 "\touter_l2_len=%#x,\n"
1417 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1419 sz = sizeof(*mb) * num;
1420 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1422 printf("%s failed, out of memory\n", __func__);
1427 tm = rte_rdtsc_precise();
1429 for (i = 0; i != num; i++)
1430 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1431 txof.tso, txof.l3, txof.l2);
1433 tm = rte_rdtsc_precise() - tm;
1434 printf("%s set tx_offload by bit-fields: %u iterations, %"
1435 PRIu64 " cycles, %#Lf cycles/iter\n",
1436 __func__, num, tm, (long double)tm / num);
1438 v1 = mb[rte_rand() % num].tx_offload;
1441 tm = rte_rdtsc_precise();
1443 for (i = 0; i != num; i++)
1444 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1445 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1447 tm = rte_rdtsc_precise() - tm;
1448 printf("%s set raw tx_offload: %u iterations, %"
1449 PRIu64 " cycles, %#Lf cycles/iter\n",
1450 __func__, num, tm, (long double)tm / num);
1452 v2 = mb[rte_rand() % num].tx_offload;
1456 printf("%s finished\n"
1457 "expected tx_offload value: 0x%" PRIx64 ";\n"
1458 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1461 return (v1 == v2) ? 0 : -EINVAL;
1465 test_get_rx_ol_flag_list(void)
1467 int len = 6, ret = 0;
1471 /* Test case to check with null buffer */
1472 ret = rte_get_rx_ol_flag_list(0, NULL, 0);
1474 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1476 /* Test case to check with zero buffer len */
1477 ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0);
1479 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1481 buflen = strlen(buf);
1483 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1486 /* Test case to check with reduced buffer len */
1487 ret = rte_get_rx_ol_flag_list(0, buf, len);
1489 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1491 buflen = strlen(buf);
1492 if (buflen != (len - 1))
1493 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1494 "received = %d\n", __func__,
1497 /* Test case to check with zero mask value */
1498 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf));
1500 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1502 buflen = strlen(buf);
1504 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1505 "non-zero, buffer should not be empty");
1507 /* Test case to check with valid mask value */
1508 ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf));
1510 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1512 buflen = strlen(buf);
1514 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1515 "non-zero, buffer should not be empty");
1523 test_get_tx_ol_flag_list(void)
1525 int len = 6, ret = 0;
1529 /* Test case to check with null buffer */
1530 ret = rte_get_tx_ol_flag_list(0, NULL, 0);
1532 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1534 /* Test case to check with zero buffer len */
1535 ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0);
1537 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1539 buflen = strlen(buf);
1541 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1545 /* Test case to check with reduced buffer len */
1546 ret = rte_get_tx_ol_flag_list(0, buf, len);
1548 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1550 buflen = strlen(buf);
1551 if (buflen != (len - 1))
1552 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1553 "received = %d\n", __func__,
1556 /* Test case to check with zero mask value */
1557 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf));
1559 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1561 buflen = strlen(buf);
1563 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1564 "non-zero, buffer should not be empty");
1566 /* Test case to check with valid mask value */
1567 ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf));
1569 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1571 buflen = strlen(buf);
1573 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1574 "non-zero, buffer should not be empty");
1588 test_get_rx_ol_flag_name(void)
1591 const char *flag_str = NULL;
1592 const struct flag_name rx_flags[] = {
1593 VAL_NAME(PKT_RX_VLAN),
1594 VAL_NAME(PKT_RX_RSS_HASH),
1595 VAL_NAME(PKT_RX_FDIR),
1596 VAL_NAME(PKT_RX_L4_CKSUM_BAD),
1597 VAL_NAME(PKT_RX_L4_CKSUM_GOOD),
1598 VAL_NAME(PKT_RX_L4_CKSUM_NONE),
1599 VAL_NAME(PKT_RX_IP_CKSUM_BAD),
1600 VAL_NAME(PKT_RX_IP_CKSUM_GOOD),
1601 VAL_NAME(PKT_RX_IP_CKSUM_NONE),
1602 VAL_NAME(PKT_RX_EIP_CKSUM_BAD),
1603 VAL_NAME(PKT_RX_VLAN_STRIPPED),
1604 VAL_NAME(PKT_RX_IEEE1588_PTP),
1605 VAL_NAME(PKT_RX_IEEE1588_TMST),
1606 VAL_NAME(PKT_RX_FDIR_ID),
1607 VAL_NAME(PKT_RX_FDIR_FLX),
1608 VAL_NAME(PKT_RX_QINQ_STRIPPED),
1609 VAL_NAME(PKT_RX_LRO),
1610 VAL_NAME(PKT_RX_TIMESTAMP),
1611 VAL_NAME(PKT_RX_SEC_OFFLOAD),
1612 VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
1613 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
1614 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD),
1615 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID),
1618 /* Test case to check with valid flag */
1619 for (i = 0; i < RTE_DIM(rx_flags); i++) {
1620 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag);
1621 if (flag_str == NULL)
1622 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1623 __func__, rx_flags[i].name);
1624 if (strcmp(flag_str, rx_flags[i].name) != 0)
1625 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1626 __func__, rx_flags[i].name, flag_str);
1628 /* Test case to check with invalid flag */
1629 flag_str = rte_get_rx_ol_flag_name(0);
1630 if (flag_str != NULL) {
1631 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1632 __func__, flag_str);
1641 test_get_tx_ol_flag_name(void)
1644 const char *flag_str = NULL;
1645 const struct flag_name tx_flags[] = {
1646 VAL_NAME(PKT_TX_VLAN),
1647 VAL_NAME(PKT_TX_IP_CKSUM),
1648 VAL_NAME(PKT_TX_TCP_CKSUM),
1649 VAL_NAME(PKT_TX_SCTP_CKSUM),
1650 VAL_NAME(PKT_TX_UDP_CKSUM),
1651 VAL_NAME(PKT_TX_IEEE1588_TMST),
1652 VAL_NAME(PKT_TX_TCP_SEG),
1653 VAL_NAME(PKT_TX_IPV4),
1654 VAL_NAME(PKT_TX_IPV6),
1655 VAL_NAME(PKT_TX_OUTER_IP_CKSUM),
1656 VAL_NAME(PKT_TX_OUTER_IPV4),
1657 VAL_NAME(PKT_TX_OUTER_IPV6),
1658 VAL_NAME(PKT_TX_TUNNEL_VXLAN),
1659 VAL_NAME(PKT_TX_TUNNEL_GRE),
1660 VAL_NAME(PKT_TX_TUNNEL_IPIP),
1661 VAL_NAME(PKT_TX_TUNNEL_GENEVE),
1662 VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP),
1663 VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE),
1664 VAL_NAME(PKT_TX_TUNNEL_IP),
1665 VAL_NAME(PKT_TX_TUNNEL_UDP),
1666 VAL_NAME(PKT_TX_QINQ),
1667 VAL_NAME(PKT_TX_MACSEC),
1668 VAL_NAME(PKT_TX_SEC_OFFLOAD),
1669 VAL_NAME(PKT_TX_UDP_SEG),
1670 VAL_NAME(PKT_TX_OUTER_UDP_CKSUM),
1671 VAL_NAME(PKT_TX_METADATA),
1674 /* Test case to check with valid flag */
1675 for (i = 0; i < RTE_DIM(tx_flags); i++) {
1676 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag);
1677 if (flag_str == NULL)
1678 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1679 __func__, tx_flags[i].name);
1680 if (strcmp(flag_str, tx_flags[i].name) != 0)
1681 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1682 __func__, tx_flags[i].name, flag_str);
1684 /* Test case to check with invalid flag */
1685 flag_str = rte_get_tx_ol_flag_name(0);
1686 if (flag_str != NULL) {
1687 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1688 __func__, flag_str);
1698 test_mbuf_validate_tx_offload(const char *test_name,
1699 struct rte_mempool *pktmbuf_pool,
1702 int expected_retval)
1704 struct rte_mbuf *m = NULL;
1707 /* alloc a mbuf and do sanity check */
1708 m = rte_pktmbuf_alloc(pktmbuf_pool);
1710 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1711 if (rte_pktmbuf_pkt_len(m) != 0)
1712 GOTO_FAIL("%s: Bad packet length\n", __func__);
1713 rte_mbuf_sanity_check(m, 0);
1714 m->ol_flags = ol_flags;
1715 m->tso_segsz = segsize;
1716 ret = rte_validate_tx_offload(m);
1717 if (ret != expected_retval)
1718 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1719 __func__, test_name, expected_retval, ret);
1720 rte_pktmbuf_free(m);
1725 rte_pktmbuf_free(m);
1732 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1734 /* test to validate tx offload flags */
1735 uint64_t ol_flags = 0;
1737 /* test to validate if IP checksum is counted only for IPV4 packet */
1738 /* set both IP checksum and IPV6 flags */
1739 ol_flags |= PKT_TX_IP_CKSUM;
1740 ol_flags |= PKT_TX_IPV6;
1741 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1743 ol_flags, 0, -EINVAL) < 0)
1744 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1745 /* resetting ol_flags for next testcase */
1748 /* test to validate if IP type is set when required */
1749 ol_flags |= PKT_TX_L4_MASK;
1750 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1752 ol_flags, 0, -EINVAL) < 0)
1753 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1755 /* test if IP type is set when TCP SEG is on */
1756 ol_flags |= PKT_TX_TCP_SEG;
1757 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1759 ol_flags, 0, -EINVAL) < 0)
1760 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1763 /* test to confirm IP type (IPV4/IPV6) is set */
1764 ol_flags = PKT_TX_L4_MASK;
1765 ol_flags |= PKT_TX_IPV6;
1766 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1768 ol_flags, 0, 0) < 0)
1769 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1772 /* test to check TSO segment size is non-zero */
1773 ol_flags |= PKT_TX_IPV4;
1774 ol_flags |= PKT_TX_TCP_SEG;
1775 /* set 0 tso segment size */
1776 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1778 ol_flags, 0, -EINVAL) < 0)
1779 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1781 /* retain IPV4 and PKT_TX_TCP_SEG mask */
1782 /* set valid tso segment size but IP CKSUM not set */
1783 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1785 ol_flags, 512, -EINVAL) < 0)
1786 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1788 /* test to validate if IP checksum is set for TSO capability */
1789 /* retain IPV4, TCP_SEG, tso_seg size */
1790 ol_flags |= PKT_TX_IP_CKSUM;
1791 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1793 ol_flags, 512, 0) < 0)
1794 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1796 /* test to confirm TSO for IPV6 type */
1798 ol_flags |= PKT_TX_IPV6;
1799 ol_flags |= PKT_TX_TCP_SEG;
1800 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1802 ol_flags, 512, 0) < 0)
1803 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1806 /* test if outer IP checksum set for non outer IPv4 packet */
1807 ol_flags |= PKT_TX_IPV6;
1808 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1809 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1811 ol_flags, 512, -EINVAL) < 0)
1812 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1815 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1816 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
1817 ol_flags |= PKT_TX_OUTER_IPV4;
1818 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1820 ol_flags, 512, 0) < 0)
1821 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1824 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1825 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1827 ol_flags, 512, 0) < 0)
1828 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1835 * Test for allocating a bulk of mbufs
1836 * define an array with positive sizes for mbufs allocations.
1839 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1842 unsigned int idx, loop;
1843 unsigned int alloc_counts[] = {
1845 MEMPOOL_CACHE_SIZE - 1,
1846 MEMPOOL_CACHE_SIZE + 1,
1847 MEMPOOL_CACHE_SIZE * 1.5,
1848 MEMPOOL_CACHE_SIZE * 2,
1849 MEMPOOL_CACHE_SIZE * 2 - 1,
1850 MEMPOOL_CACHE_SIZE * 2 + 1,
1854 /* allocate a large array of mbuf pointers */
1855 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1856 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1857 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1860 for (loop = 0; loop < alloc_counts[idx] &&
1861 mbufs[loop] != NULL; loop++)
1862 rte_pktmbuf_free(mbufs[loop]);
1863 } else if (ret != 0) {
1864 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1865 __func__, alloc_counts[idx], ret);
1873 * Negative testing for allocating a bulk of mbufs
1876 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1879 unsigned int idx, loop;
1880 unsigned int neg_alloc_counts[] = {
1881 MEMPOOL_CACHE_SIZE - NB_MBUF,
1886 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1888 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1889 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1890 neg_alloc_counts[idx]);
1892 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1893 __func__, neg_alloc_counts[idx], ret);
1894 for (loop = 0; loop < neg_alloc_counts[idx] &&
1895 mbufs[loop] != NULL; loop++)
1896 rte_pktmbuf_free(mbufs[loop]);
1904 * Test to read mbuf packet using rte_pktmbuf_read
1907 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1909 struct rte_mbuf *m = NULL;
1911 const char *data_copy = NULL;
1915 m = rte_pktmbuf_alloc(pktmbuf_pool);
1917 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1918 if (rte_pktmbuf_pkt_len(m) != 0)
1919 GOTO_FAIL("%s: Bad packet length\n", __func__);
1920 rte_mbuf_sanity_check(m, 0);
1922 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1924 GOTO_FAIL("%s: Cannot append data\n", __func__);
1925 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1926 GOTO_FAIL("%s: Bad packet length\n", __func__);
1927 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1929 /* read the data from mbuf */
1930 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1931 if (data_copy == NULL)
1932 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1933 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1934 if (data_copy[off] != (char)0xfe)
1935 GOTO_FAIL("Data corrupted at offset %u", off);
1937 rte_pktmbuf_free(m);
1943 rte_pktmbuf_free(m);
1950 * Test to read mbuf packet data from offset
1953 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1955 struct rte_mbuf *m = NULL;
1956 struct ether_hdr *hdr = NULL;
1958 const char *data_copy = NULL;
1960 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1963 m = rte_pktmbuf_alloc(pktmbuf_pool);
1965 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1967 if (rte_pktmbuf_pkt_len(m) != 0)
1968 GOTO_FAIL("%s: Bad packet length\n", __func__);
1969 rte_mbuf_sanity_check(m, 0);
1971 /* prepend an ethernet header */
1972 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1974 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1975 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1976 GOTO_FAIL("%s: Bad pkt length", __func__);
1977 if (rte_pktmbuf_data_len(m) != hdr_len)
1978 GOTO_FAIL("%s: Bad data length", __func__);
1979 memset(hdr, 0xde, hdr_len);
1981 /* read mbuf header info from 0 offset */
1982 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
1983 if (data_copy == NULL)
1984 GOTO_FAIL("%s: Error in reading header!\n", __func__);
1985 for (off = 0; off < hdr_len; off++) {
1986 if (data_copy[off] != (char)0xde)
1987 GOTO_FAIL("Header info corrupted at offset %u", off);
1990 /* append sample data after ethernet header */
1991 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1993 GOTO_FAIL("%s: Cannot append data\n", __func__);
1994 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1995 GOTO_FAIL("%s: Bad packet length\n", __func__);
1996 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1997 GOTO_FAIL("%s: Bad data length\n", __func__);
1998 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
2000 /* read mbuf data after header info */
2001 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
2002 if (data_copy == NULL)
2003 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
2004 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2005 if (data_copy[off] != (char)0xcc)
2006 GOTO_FAIL("Data corrupted at offset %u", off);
2009 /* partial reading of mbuf data */
2010 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
2012 if (data_copy == NULL)
2013 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2014 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
2015 GOTO_FAIL("%s: Incorrect data length!\n", __func__);
2016 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
2017 if (data_copy[off] != (char)0xcc)
2018 GOTO_FAIL("Data corrupted at offset %u", off);
2021 /* read length greater than mbuf data_len */
2022 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
2024 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
2027 /* read length greater than mbuf pkt_len */
2028 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
2030 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
2033 /* read data of zero len from valid offset */
2034 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
2035 if (data_copy == NULL)
2036 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2037 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
2038 GOTO_FAIL("%s: Corrupted data content!\n", __func__);
2039 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2040 if (data_copy[off] != (char)0xcc)
2041 GOTO_FAIL("Data corrupted at offset %u", off);
2044 /* read data of zero length from zero offset */
2045 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
2046 if (data_copy == NULL)
2047 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2048 /* check if the received address is the beginning of header info */
2049 if (hdr != (const struct ether_hdr *)data_copy)
2050 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2052 /* read data of max length from valid offset */
2053 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
2054 if (data_copy == NULL)
2055 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2056 /* check if the received address is the beginning of data segment */
2057 if (data_copy != data)
2058 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2060 /* try to read from mbuf with max size offset */
2061 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
2062 if (data_copy != NULL)
2063 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2065 /* try to read from mbuf with max size offset and len */
2066 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
2067 if (data_copy != NULL)
2068 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2070 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2072 rte_pktmbuf_free(m);
2078 rte_pktmbuf_free(m);
2085 unsigned int seg_count;
2089 unsigned int seg_lengths[MBUF_MAX_SEG];
2092 /* create a mbuf with different sized segments
2093 * and fill with data [0x00 0x01 0x02 ...]
2095 static struct rte_mbuf *
2096 create_packet(struct rte_mempool *pktmbuf_pool,
2097 struct test_case *test_data)
2099 uint16_t i, ret, seg, seg_len = 0;
2100 uint32_t last_index = 0;
2101 unsigned int seg_lengths[MBUF_MAX_SEG];
2102 unsigned int hdr_len;
2103 struct rte_mbuf *pkt = NULL;
2104 struct rte_mbuf *pkt_seg = NULL;
2108 memcpy(seg_lengths, test_data->seg_lengths,
2109 sizeof(unsigned int)*test_data->seg_count);
2110 for (seg = 0; seg < test_data->seg_count; seg++) {
2112 seg_len = seg_lengths[seg];
2113 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
2114 if (pkt_seg == NULL)
2115 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2116 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
2117 GOTO_FAIL("%s: Bad packet length\n", __func__);
2118 rte_mbuf_sanity_check(pkt_seg, 0);
2119 /* Add header only for the first segment */
2120 if (test_data->flags == MBUF_HEADER && seg == 0) {
2121 hdr_len = sizeof(struct rte_ether_hdr);
2122 /* prepend a header and fill with dummy data */
2123 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
2125 GOTO_FAIL("%s: Cannot prepend header\n",
2127 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
2128 GOTO_FAIL("%s: Bad pkt length", __func__);
2129 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
2130 GOTO_FAIL("%s: Bad data length", __func__);
2131 for (i = 0; i < hdr_len; i++)
2132 hdr[i] = (last_index + i) % 0xffff;
2133 last_index += hdr_len;
2135 /* skip appending segment with 0 length */
2138 data = rte_pktmbuf_append(pkt_seg, seg_len);
2140 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
2141 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
2142 GOTO_FAIL("%s: Bad packet segment length: %d\n",
2143 __func__, rte_pktmbuf_pkt_len(pkt_seg));
2144 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
2145 GOTO_FAIL("%s: Bad data length\n", __func__);
2146 for (i = 0; i < seg_len; i++)
2147 data[i] = (last_index + i) % 0xffff;
2148 /* to fill continuous data from one seg to another */
2150 /* create chained mbufs */
2154 ret = rte_pktmbuf_chain(pkt, pkt_seg);
2156 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
2160 pkt_seg = pkt_seg->next;
2165 rte_pktmbuf_free(pkt);
2168 if (pkt_seg != NULL) {
2169 rte_pktmbuf_free(pkt_seg);
2176 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
2179 struct test_case test_cases[] = {
2181 .seg_lengths = { 100, 100, 100 },
2183 .flags = MBUF_NO_HEADER,
2188 .seg_lengths = { 100, 125, 150 },
2190 .flags = MBUF_NO_HEADER,
2195 .seg_lengths = { 100, 100 },
2197 .flags = MBUF_NO_HEADER,
2202 .seg_lengths = { 100, 200 },
2204 .flags = MBUF_HEADER,
2205 .read_off = sizeof(struct rte_ether_hdr),
2209 .seg_lengths = { 1000, 100 },
2211 .flags = MBUF_NO_HEADER,
2216 .seg_lengths = { 1024, 0, 100 },
2218 .flags = MBUF_NO_HEADER,
2223 .seg_lengths = { 1000, 1, 1000 },
2225 .flags = MBUF_NO_HEADER,
2230 .seg_lengths = { MBUF_TEST_DATA_LEN,
2231 MBUF_TEST_DATA_LEN2,
2232 MBUF_TEST_DATA_LEN3, 800, 10 },
2234 .flags = MBUF_NEG_TEST_READ,
2236 .read_len = MBUF_DATA_SIZE
2241 const char *data_copy = NULL;
2242 char data_buf[MBUF_DATA_SIZE];
2244 memset(data_buf, 0, MBUF_DATA_SIZE);
2246 for (i = 0; i < RTE_DIM(test_cases); i++) {
2247 m = create_packet(pktmbuf_pool, &test_cases[i]);
2249 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2251 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2252 test_cases[i].read_len, data_buf);
2253 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2254 if (data_copy != NULL)
2255 GOTO_FAIL("%s: mbuf data read should fail!\n",
2258 rte_pktmbuf_free(m);
2263 if (data_copy == NULL)
2264 GOTO_FAIL("%s: Error in reading packet data!\n",
2266 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2267 if (data_copy[pos] !=
2268 (char)((test_cases[i].read_off + pos)
2270 GOTO_FAIL("Data corrupted at offset %u is %2X",
2271 pos, data_copy[pos]);
2273 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2274 rte_pktmbuf_free(m);
2281 rte_pktmbuf_free(m);
2287 /* Define a free call back function to be used for external buffer */
2289 ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque)
2291 void *ext_buf_addr = opaque;
2293 if (ext_buf_addr == NULL) {
2294 printf("External buffer address is invalid\n");
2297 rte_free(ext_buf_addr);
2298 ext_buf_addr = NULL;
2299 printf("External buffer freed via callback\n");
2303 * Test to initialize shared data in external buffer before attaching to mbuf
2304 * - Allocate mbuf with no data.
2305 * - Allocate external buffer with size should be large enough to accommodate
2306 * rte_mbuf_ext_shared_info.
2307 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2308 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2309 * - Clone another mbuf and attach the same external buffer to it.
2310 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2313 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2315 struct rte_mbuf *m = NULL;
2316 struct rte_mbuf *clone = NULL;
2317 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2318 rte_iova_t buf_iova;
2319 void *ext_buf_addr = NULL;
2320 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2321 sizeof(struct rte_mbuf_ext_shared_info);
2324 m = rte_pktmbuf_alloc(pktmbuf_pool);
2326 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2327 if (rte_pktmbuf_pkt_len(m) != 0)
2328 GOTO_FAIL("%s: Bad packet length\n", __func__);
2329 rte_mbuf_sanity_check(m, 0);
2331 ext_buf_addr = rte_malloc("External buffer", buf_len,
2332 RTE_CACHE_LINE_SIZE);
2333 if (ext_buf_addr == NULL)
2334 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2336 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2337 ext_buf_free_callback_fn, ext_buf_addr);
2338 if (ret_shinfo == NULL)
2339 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2341 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2342 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2344 if (rte_mbuf_refcnt_read(m) != 1)
2345 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2347 buf_iova = rte_mempool_virt2iova(ext_buf_addr);
2348 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2350 if (m->ol_flags != EXT_ATTACHED_MBUF)
2351 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2354 /* allocate one more mbuf */
2355 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2357 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2358 if (rte_pktmbuf_pkt_len(clone) != 0)
2359 GOTO_FAIL("%s: Bad packet length\n", __func__);
2361 /* attach the same external buffer to the cloned mbuf */
2362 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
2364 if (clone->ol_flags != EXT_ATTACHED_MBUF)
2365 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2368 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2369 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2371 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2372 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2373 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2374 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2376 /* reset the ext_refcnt before freeing the external buffer */
2377 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2378 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2379 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2381 /* detach the external buffer from mbufs */
2382 rte_pktmbuf_detach_extbuf(m);
2383 /* check if ref cnt is decremented */
2384 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2385 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2387 rte_pktmbuf_detach_extbuf(clone);
2388 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0)
2389 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2391 rte_pktmbuf_free(m);
2393 rte_pktmbuf_free(clone);
2400 rte_pktmbuf_free(m);
2404 rte_pktmbuf_free(clone);
2407 if (ext_buf_addr != NULL) {
2408 rte_free(ext_buf_addr);
2409 ext_buf_addr = NULL;
2418 struct rte_mempool *pktmbuf_pool = NULL;
2419 struct rte_mempool *pktmbuf_pool2 = NULL;
2422 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2424 /* create pktmbuf pool if it does not exist */
2425 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2426 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2429 if (pktmbuf_pool == NULL) {
2430 printf("cannot allocate mbuf pool\n");
2434 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2436 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2437 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2440 if (pktmbuf_pool2 == NULL) {
2441 printf("cannot allocate mbuf pool\n");
2445 /* test multiple mbuf alloc */
2446 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2447 printf("test_mbuf_pool() failed\n");
2451 /* do it another time to check that all mbufs were freed */
2452 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2453 printf("test_mbuf_pool() failed (2)\n");
2457 /* test bulk mbuf alloc and free */
2458 if (test_pktmbuf_pool_bulk() < 0) {
2459 printf("test_pktmbuf_pool_bulk() failed\n");
2463 /* test that the pointer to the data on a packet mbuf is set properly */
2464 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2465 printf("test_pktmbuf_pool_ptr() failed\n");
2469 /* test data manipulation in mbuf */
2470 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2471 printf("test_one_mbuf() failed\n");
2477 * do it another time, to check that allocation reinitialize
2478 * the mbuf correctly
2480 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2481 printf("test_one_mbuf() failed (2)\n");
2485 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2486 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2490 /* test free pktmbuf segment one by one */
2491 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2492 printf("test_pktmbuf_free_segment() failed.\n");
2496 if (testclone_testupdate_testdetach(pktmbuf_pool) < 0) {
2497 printf("testclone_and_testupdate() failed \n");
2501 if (test_pktmbuf_copy(pktmbuf_pool) < 0) {
2502 printf("test_pktmbuf_copy() failed\n");
2506 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2507 printf("test_attach_from_different_pool() failed\n");
2511 if (test_refcnt_mbuf() < 0) {
2512 printf("test_refcnt_mbuf() failed \n");
2516 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2517 printf("test_failing_mbuf_sanity_check() failed\n");
2521 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2522 printf("test_mbuf_linearize_check() failed\n");
2526 if (test_tx_offload() < 0) {
2527 printf("test_tx_offload() failed\n");
2531 if (test_get_rx_ol_flag_list() < 0) {
2532 printf("test_rte_get_rx_ol_flag_list() failed\n");
2536 if (test_get_tx_ol_flag_list() < 0) {
2537 printf("test_rte_get_tx_ol_flag_list() failed\n");
2541 if (test_get_rx_ol_flag_name() < 0) {
2542 printf("test_rte_get_rx_ol_flag_name() failed\n");
2546 if (test_get_tx_ol_flag_name() < 0) {
2547 printf("test_rte_get_tx_ol_flag_name() failed\n");
2551 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2552 printf("test_mbuf_validate_tx_offload_one() failed\n");
2556 /* test for allocating a bulk of mbufs with various sizes */
2557 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2558 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2562 /* test for allocating a bulk of mbufs with various sizes */
2563 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2564 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2568 /* test to read mbuf packet */
2569 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2570 printf("test_rte_pktmbuf_read() failed\n");
2574 /* test to read mbuf packet from offset */
2575 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2576 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2580 /* test to read data from chain of mbufs with data segments */
2581 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2582 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2586 /* test to initialize shared info. at the end of external buffer */
2587 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2588 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2594 rte_mempool_free(pktmbuf_pool);
2595 rte_mempool_free(pktmbuf_pool2);
2600 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);