1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include "rte_rawdev.h"
9 #include "rte_ioat_rawdev.h"
10 #include "ioat_private.h"
12 #define MAX_SUPPORTED_RAWDEVS 64
13 #define TEST_SKIPPED 77
16 int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
18 static struct rte_mempool *pool;
19 static unsigned short expected_ring_size[MAX_SUPPORTED_RAWDEVS];
21 #define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
24 __rte_format_printf(3, 4)
25 print_err(const char *func, int lineno, const char *format, ...)
30 ret = fprintf(stderr, "In %s:%d - ", func, lineno);
32 ret += vfprintf(stderr, format, ap);
39 do_multi_copies(int dev_id, int split_batches, int split_completions)
41 struct rte_mbuf *srcs[32], *dsts[32];
42 struct rte_mbuf *completed_src[64];
43 struct rte_mbuf *completed_dst[64];
46 for (i = 0; i < RTE_DIM(srcs); i++) {
49 if (split_batches && i == RTE_DIM(srcs) / 2)
50 rte_ioat_perform_ops(dev_id);
52 srcs[i] = rte_pktmbuf_alloc(pool);
53 dsts[i] = rte_pktmbuf_alloc(pool);
54 src_data = rte_pktmbuf_mtod(srcs[i], char *);
56 for (j = 0; j < COPY_LEN; j++)
57 src_data[j] = rand() & 0xFF;
59 if (rte_ioat_enqueue_copy(dev_id,
60 srcs[i]->buf_iova + srcs[i]->data_off,
61 dsts[i]->buf_iova + dsts[i]->data_off,
64 (uintptr_t)dsts[i]) != 1) {
65 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
70 rte_ioat_perform_ops(dev_id);
73 if (split_completions) {
74 /* gather completions in two halves */
75 uint16_t half_len = RTE_DIM(srcs) / 2;
76 if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL,
77 (void *)completed_src,
78 (void *)completed_dst) != half_len) {
79 PRINT_ERR("Error with rte_ioat_completed_ops - first half request\n");
80 rte_rawdev_dump(dev_id, stdout);
83 if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL,
84 (void *)&completed_src[half_len],
85 (void *)&completed_dst[half_len]) != half_len) {
86 PRINT_ERR("Error with rte_ioat_completed_ops - second half request\n");
87 rte_rawdev_dump(dev_id, stdout);
91 /* gather all completions in one go */
92 if (rte_ioat_completed_ops(dev_id, RTE_DIM(completed_src), NULL, NULL,
93 (void *)completed_src,
94 (void *)completed_dst) != RTE_DIM(srcs)) {
95 PRINT_ERR("Error with rte_ioat_completed_ops\n");
96 rte_rawdev_dump(dev_id, stdout);
100 for (i = 0; i < RTE_DIM(srcs); i++) {
101 char *src_data, *dst_data;
103 if (completed_src[i] != srcs[i]) {
104 PRINT_ERR("Error with source pointer %u\n", i);
107 if (completed_dst[i] != dsts[i]) {
108 PRINT_ERR("Error with dest pointer %u\n", i);
112 src_data = rte_pktmbuf_mtod(srcs[i], char *);
113 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
114 for (j = 0; j < COPY_LEN; j++)
115 if (src_data[j] != dst_data[j]) {
116 PRINT_ERR("Error with copy of packet %u, byte %u\n",
120 rte_pktmbuf_free(srcs[i]);
121 rte_pktmbuf_free(dsts[i]);
127 test_enqueue_copies(int dev_id)
131 /* test doing a single copy */
133 struct rte_mbuf *src, *dst;
134 char *src_data, *dst_data;
135 struct rte_mbuf *completed[2] = {0};
137 src = rte_pktmbuf_alloc(pool);
138 dst = rte_pktmbuf_alloc(pool);
139 src_data = rte_pktmbuf_mtod(src, char *);
140 dst_data = rte_pktmbuf_mtod(dst, char *);
142 for (i = 0; i < COPY_LEN; i++)
143 src_data[i] = rand() & 0xFF;
145 if (rte_ioat_enqueue_copy(dev_id,
146 src->buf_iova + src->data_off,
147 dst->buf_iova + dst->data_off,
150 (uintptr_t)dst) != 1) {
151 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
154 rte_ioat_perform_ops(dev_id);
157 if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
158 (void *)&completed[1]) != 1) {
159 PRINT_ERR("Error with rte_ioat_completed_ops\n");
162 if (completed[0] != src || completed[1] != dst) {
163 PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
164 completed[0], completed[1], src, dst);
168 for (i = 0; i < COPY_LEN; i++)
169 if (dst_data[i] != src_data[i]) {
170 PRINT_ERR("Data mismatch at char %u [Got %02x not %02x]\n",
171 i, dst_data[i], src_data[i]);
174 rte_pktmbuf_free(src);
175 rte_pktmbuf_free(dst);
177 /* check ring is now empty */
178 if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
179 (void *)&completed[1]) != 0) {
180 PRINT_ERR("Error: got unexpected returned handles from rte_ioat_completed_ops\n");
185 /* test doing a multiple single copies */
187 const uint16_t max_ops = 4;
188 struct rte_mbuf *src, *dst;
189 char *src_data, *dst_data;
190 struct rte_mbuf *completed[32] = {0};
191 const uint16_t max_completions = RTE_DIM(completed) / 2;
193 src = rte_pktmbuf_alloc(pool);
194 dst = rte_pktmbuf_alloc(pool);
195 src_data = rte_pktmbuf_mtod(src, char *);
196 dst_data = rte_pktmbuf_mtod(dst, char *);
198 for (i = 0; i < COPY_LEN; i++)
199 src_data[i] = rand() & 0xFF;
201 /* perform the same copy <max_ops> times */
202 for (i = 0; i < max_ops; i++) {
203 if (rte_ioat_enqueue_copy(dev_id,
204 src->buf_iova + src->data_off,
205 dst->buf_iova + dst->data_off,
208 (uintptr_t)dst) != 1) {
209 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
212 rte_ioat_perform_ops(dev_id);
216 if (rte_ioat_completed_ops(dev_id, max_completions, NULL, NULL,
217 (void *)&completed[0],
218 (void *)&completed[max_completions]) != max_ops) {
219 PRINT_ERR("Error with rte_ioat_completed_ops\n");
220 rte_rawdev_dump(dev_id, stdout);
223 if (completed[0] != src || completed[max_completions] != dst) {
224 PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
225 completed[0], completed[max_completions], src, dst);
229 for (i = 0; i < COPY_LEN; i++)
230 if (dst_data[i] != src_data[i]) {
231 PRINT_ERR("Data mismatch at char %u\n", i);
234 rte_pktmbuf_free(src);
235 rte_pktmbuf_free(dst);
238 /* test doing multiple copies */
239 do_multi_copies(dev_id, 0, 0); /* enqueue and complete one batch at a time */
240 do_multi_copies(dev_id, 1, 0); /* enqueue 2 batches and then complete both */
241 do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
246 test_enqueue_fill(int dev_id)
248 const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
249 struct rte_mbuf *dst = rte_pktmbuf_alloc(pool);
250 char *dst_data = rte_pktmbuf_mtod(dst, char *);
251 struct rte_mbuf *completed[2] = {0};
252 uint64_t pattern = 0xfedcba9876543210;
255 for (i = 0; i < RTE_DIM(lengths); i++) {
257 memset(dst_data, 0, lengths[i]);
259 /* perform the fill operation */
260 if (rte_ioat_enqueue_fill(dev_id, pattern,
261 dst->buf_iova + dst->data_off, lengths[i],
262 (uintptr_t)dst) != 1) {
263 PRINT_ERR("Error with rte_ioat_enqueue_fill\n");
267 rte_ioat_perform_ops(dev_id);
270 if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
271 (void *)&completed[1]) != 1) {
272 PRINT_ERR("Error with completed ops\n");
275 /* check the result */
276 for (j = 0; j < lengths[i]; j++) {
277 char pat_byte = ((char *)&pattern)[j % 8];
278 if (dst_data[j] != pat_byte) {
279 PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
280 lengths[i], dst_data[j], pat_byte);
286 rte_pktmbuf_free(dst);
291 test_burst_capacity(int dev_id)
293 #define BURST_SIZE 64
294 const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
295 struct rte_mbuf *src, *dst;
296 unsigned int length = 1024;
297 unsigned int i, j, iter;
298 unsigned int old_cap, cap;
299 uintptr_t completions[BURST_SIZE];
301 src = rte_pktmbuf_alloc(pool);
302 dst = rte_pktmbuf_alloc(pool);
304 old_cap = ring_space;
305 /* to test capacity, we enqueue elements and check capacity is reduced
306 * by one each time - rebaselining the expected value after each burst
307 * as the capacity is only for a burst. We enqueue multiple bursts to
308 * fill up half the ring, before emptying it again. We do this twice to
309 * ensure that we get to test scenarios where we get ring wrap-around
311 for (iter = 0; iter < 2; iter++) {
312 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
313 cap = rte_ioat_burst_capacity(dev_id);
315 PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
320 for (j = 0; j < BURST_SIZE; j++) {
321 if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
322 rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
323 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
326 if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
327 PRINT_ERR("Error, ring capacity did not change as expected\n");
331 rte_ioat_perform_ops(dev_id);
334 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
335 if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
337 completions, completions) != BURST_SIZE) {
338 PRINT_ERR("Error with completions\n");
342 if (rte_ioat_burst_capacity(dev_id) != ring_space) {
343 PRINT_ERR("Error, ring capacity has not reset to original value\n");
346 old_cap = ring_space;
349 rte_pktmbuf_free(src);
350 rte_pktmbuf_free(dst);
356 test_completion_status(int dev_id)
358 #define COMP_BURST_SZ 16
359 const unsigned int fail_copy[] = {0, 7, 15};
360 struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
361 struct rte_mbuf *completed_src[COMP_BURST_SZ * 2];
362 struct rte_mbuf *completed_dst[COMP_BURST_SZ * 2];
363 unsigned int length = 1024;
367 /* Test single full batch statuses */
368 for (i = 0; i < RTE_DIM(fail_copy); i++) {
369 uint32_t status[COMP_BURST_SZ] = {0};
372 for (j = 0; j < COMP_BURST_SZ; j++) {
373 srcs[j] = rte_pktmbuf_alloc(pool);
374 dsts[j] = rte_pktmbuf_alloc(pool);
376 if (rte_ioat_enqueue_copy(dev_id,
377 (j == fail_copy[i] ? (phys_addr_t)NULL :
378 (srcs[j]->buf_iova + srcs[j]->data_off)),
379 dsts[j]->buf_iova + dsts[j]->data_off,
382 (uintptr_t)dsts[j]) != 1) {
383 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
387 rte_ioat_perform_ops(dev_id);
390 if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, ¬_ok,
391 (void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) {
392 PRINT_ERR("Error with rte_ioat_completed_ops\n");
393 rte_rawdev_dump(dev_id, stdout);
396 if (not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS) {
398 PRINT_ERR("Error, missing expected failed copy, %u\n", fail_copy[i]);
399 for (j = 0; j < COMP_BURST_SZ; j++)
400 printf("%u ", status[j]);
401 printf("<-- Statuses\n");
404 for (j = 0; j < COMP_BURST_SZ; j++) {
405 rte_pktmbuf_free(completed_src[j]);
406 rte_pktmbuf_free(completed_dst[j]);
410 /* Test gathering status for two batches at once */
411 for (i = 0; i < RTE_DIM(fail_copy); i++) {
412 uint32_t status[COMP_BURST_SZ] = {0};
413 unsigned int batch, j;
414 unsigned int expected_failures = 0;
416 for (batch = 0; batch < 2; batch++) {
417 for (j = 0; j < COMP_BURST_SZ/2; j++) {
418 srcs[j] = rte_pktmbuf_alloc(pool);
419 dsts[j] = rte_pktmbuf_alloc(pool);
421 if (j == fail_copy[i])
423 if (rte_ioat_enqueue_copy(dev_id,
424 (j == fail_copy[i] ? (phys_addr_t)NULL :
425 (srcs[j]->buf_iova + srcs[j]->data_off)),
426 dsts[j]->buf_iova + dsts[j]->data_off,
429 (uintptr_t)dsts[j]) != 1) {
430 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
435 rte_ioat_perform_ops(dev_id);
439 if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, ¬_ok,
440 (void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) {
441 PRINT_ERR("Error with rte_ioat_completed_ops\n");
442 rte_rawdev_dump(dev_id, stdout);
445 if (not_ok != expected_failures) {
447 PRINT_ERR("Error, missing expected failed copy, got %u, not %u\n",
448 not_ok, expected_failures);
449 for (j = 0; j < COMP_BURST_SZ; j++)
450 printf("%u ", status[j]);
451 printf("<-- Statuses\n");
454 for (j = 0; j < COMP_BURST_SZ; j++) {
455 rte_pktmbuf_free(completed_src[j]);
456 rte_pktmbuf_free(completed_dst[j]);
460 /* Test gathering status for half batch at a time */
461 for (i = 0; i < RTE_DIM(fail_copy); i++) {
462 uint32_t status[COMP_BURST_SZ] = {0};
465 for (j = 0; j < COMP_BURST_SZ; j++) {
466 srcs[j] = rte_pktmbuf_alloc(pool);
467 dsts[j] = rte_pktmbuf_alloc(pool);
469 if (rte_ioat_enqueue_copy(dev_id,
470 (j == fail_copy[i] ? (phys_addr_t)NULL :
471 (srcs[j]->buf_iova + srcs[j]->data_off)),
472 dsts[j]->buf_iova + dsts[j]->data_off,
475 (uintptr_t)dsts[j]) != 1) {
476 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
480 rte_ioat_perform_ops(dev_id);
483 if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, ¬_ok,
484 (void *)completed_src,
485 (void *)completed_dst) != (COMP_BURST_SZ / 2)) {
486 PRINT_ERR("Error with rte_ioat_completed_ops\n");
487 rte_rawdev_dump(dev_id, stdout);
490 if (fail_copy[i] < COMP_BURST_SZ / 2 &&
491 (not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS)) {
492 PRINT_ERR("Missing expected failure in first half-batch\n");
493 rte_rawdev_dump(dev_id, stdout);
496 if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, ¬_ok,
497 (void *)&completed_src[COMP_BURST_SZ / 2],
498 (void *)&completed_dst[COMP_BURST_SZ / 2]) != (COMP_BURST_SZ / 2)) {
499 PRINT_ERR("Error with rte_ioat_completed_ops\n");
500 rte_rawdev_dump(dev_id, stdout);
503 if (fail_copy[i] >= COMP_BURST_SZ / 2 && (not_ok != 1 ||
504 status[fail_copy[i] - (COMP_BURST_SZ / 2)]
505 == RTE_IOAT_OP_SUCCESS)) {
506 PRINT_ERR("Missing expected failure in second half-batch\n");
507 rte_rawdev_dump(dev_id, stdout);
511 for (j = 0; j < COMP_BURST_SZ; j++) {
512 rte_pktmbuf_free(completed_src[j]);
513 rte_pktmbuf_free(completed_dst[j]);
517 /* Test gathering statuses with fence */
518 for (i = 1; i < RTE_DIM(fail_copy); i++) {
519 uint32_t status[COMP_BURST_SZ * 2] = {0};
523 for (j = 0; j < COMP_BURST_SZ; j++) {
524 srcs[j] = rte_pktmbuf_alloc(pool);
525 dsts[j] = rte_pktmbuf_alloc(pool);
527 /* always fail the first copy */
528 if (rte_ioat_enqueue_copy(dev_id,
529 (j == 0 ? (phys_addr_t)NULL :
530 (srcs[j]->buf_iova + srcs[j]->data_off)),
531 dsts[j]->buf_iova + dsts[j]->data_off,
534 (uintptr_t)dsts[j]) != 1) {
535 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
538 /* put in a fence which will stop any further transactions
539 * because we had a previous failure.
541 if (j == fail_copy[i])
542 rte_ioat_fence(dev_id);
544 rte_ioat_perform_ops(dev_id);
547 count = rte_ioat_completed_ops(dev_id, COMP_BURST_SZ * 2, status, ¬_ok,
548 (void *)completed_src, (void *)completed_dst);
549 if (count != COMP_BURST_SZ) {
550 PRINT_ERR("Error with rte_ioat_completed_ops, got %u not %u\n",
551 count, COMP_BURST_SZ);
552 for (j = 0; j < count; j++)
553 printf("%u ", status[j]);
554 printf("<-- Statuses\n");
557 if (not_ok != COMP_BURST_SZ - fail_copy[i]) {
558 PRINT_ERR("Unexpected failed copy count, got %u, expected %u\n",
559 not_ok, COMP_BURST_SZ - fail_copy[i]);
560 for (j = 0; j < COMP_BURST_SZ; j++)
561 printf("%u ", status[j]);
562 printf("<-- Statuses\n");
565 if (status[0] == RTE_IOAT_OP_SUCCESS || status[0] == RTE_IOAT_OP_SKIPPED) {
566 PRINT_ERR("Error, op 0 unexpectedly did not fail.\n");
569 for (j = 1; j <= fail_copy[i]; j++) {
570 if (status[j] != RTE_IOAT_OP_SUCCESS) {
571 PRINT_ERR("Error, op %u unexpectedly failed\n", j);
575 for (j = fail_copy[i] + 1; j < COMP_BURST_SZ; j++) {
576 if (status[j] != RTE_IOAT_OP_SKIPPED) {
577 PRINT_ERR("Error, all descriptors after fence should be invalid\n");
581 for (j = 0; j < COMP_BURST_SZ; j++) {
582 rte_pktmbuf_free(completed_src[j]);
583 rte_pktmbuf_free(completed_dst[j]);
591 ioat_rawdev_test(uint16_t dev_id)
593 #define IOAT_TEST_RINGSIZE 512
594 const struct rte_idxd_rawdev *idxd =
595 (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
596 const enum rte_ioat_dev_type ioat_type = idxd->type;
597 struct rte_ioat_rawdev_config p = { .ring_size = -1 };
598 struct rte_rawdev_info info = { .dev_private = &p };
599 struct rte_rawdev_xstats_name *snames = NULL;
600 uint64_t *stats = NULL;
601 unsigned int *ids = NULL;
602 unsigned int nb_xstats;
605 if (dev_id >= MAX_SUPPORTED_RAWDEVS) {
606 printf("Skipping test. Cannot test rawdevs with id's greater than %d\n",
607 MAX_SUPPORTED_RAWDEVS);
611 rte_rawdev_info_get(dev_id, &info, sizeof(p));
612 if (p.ring_size != expected_ring_size[dev_id]) {
613 PRINT_ERR("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
614 (int)p.ring_size, expected_ring_size[dev_id]);
618 p.ring_size = IOAT_TEST_RINGSIZE;
619 if (rte_rawdev_configure(dev_id, &info, sizeof(p)) != 0) {
620 PRINT_ERR("Error with rte_rawdev_configure()\n");
623 rte_rawdev_info_get(dev_id, &info, sizeof(p));
624 if (p.ring_size != IOAT_TEST_RINGSIZE) {
625 PRINT_ERR("Error, ring size is not %d (%d)\n",
626 IOAT_TEST_RINGSIZE, (int)p.ring_size);
629 expected_ring_size[dev_id] = p.ring_size;
631 if (rte_rawdev_start(dev_id) != 0) {
632 PRINT_ERR("Error with rte_rawdev_start()\n");
636 pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
637 p.ring_size * 2, /* n == num elements */
640 2048, /* data room size */
643 PRINT_ERR("Error with mempool creation\n");
647 /* allocate memory for xstats names and values */
648 nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
650 snames = malloc(sizeof(*snames) * nb_xstats);
651 if (snames == NULL) {
652 PRINT_ERR("Error allocating xstat names memory\n");
655 rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
657 ids = malloc(sizeof(*ids) * nb_xstats);
659 PRINT_ERR("Error allocating xstat ids memory\n");
662 for (i = 0; i < nb_xstats; i++)
665 stats = malloc(sizeof(*stats) * nb_xstats);
667 PRINT_ERR("Error allocating xstat memory\n");
671 /* run the test cases */
672 printf("Running Copy Tests\n");
673 for (i = 0; i < 100; i++) {
676 if (test_enqueue_copies(dev_id) != 0)
679 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
680 for (j = 0; j < nb_xstats; j++)
681 printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
686 /* test enqueue fill operation */
687 printf("Running Fill Tests\n");
688 for (i = 0; i < 100; i++) {
691 if (test_enqueue_fill(dev_id) != 0)
694 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
695 for (j = 0; j < nb_xstats; j++)
696 printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
701 printf("Running Burst Capacity Test\n");
702 if (test_burst_capacity(dev_id) != 0)
705 /* only DSA devices report address errors, and we can only use null pointers
706 * to generate those errors when DPDK is in VA mode.
708 if (rte_eal_iova_mode() == RTE_IOVA_VA && ioat_type == RTE_IDXD_DEV) {
709 printf("Running Completions Status Test\n");
710 if (test_completion_status(dev_id) != 0)
714 rte_rawdev_stop(dev_id);
715 if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
716 PRINT_ERR("Error resetting xstat values\n");
720 rte_mempool_free(pool);
727 rte_rawdev_stop(dev_id);
728 rte_rawdev_xstats_reset(dev_id, NULL, 0);
729 rte_mempool_free(pool);