1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
8 #include <rte_dmadev.h>
10 #include <rte_pause.h>
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_dmadev_pmd.h>
17 #include "test_dmadev_api.h"
19 #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
23 static struct rte_mempool *pool;
24 static uint16_t id_count;
27 __rte_format_printf(3, 4)
28 print_err(const char *func, int lineno, const char *format, ...)
32 fprintf(stderr, "In %s:%d - ", func, lineno);
34 vfprintf(stderr, format, ap);
39 runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations,
40 int16_t dev_id, uint16_t vchan, bool check_err_stats)
42 struct rte_dma_stats stats;
45 rte_dma_stats_reset(dev_id, vchan);
46 printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
47 check_err_stats ? " " : "(errors expected)");
48 for (i = 0; i < iterations; i++) {
49 if (test_fn(dev_id, vchan) < 0)
52 rte_dma_stats_get(dev_id, 0, &stats);
53 printf("Ops submitted: %"PRIu64"\t", stats.submitted);
54 printf("Ops completed: %"PRIu64"\t", stats.completed);
55 printf("Errors: %"PRIu64"\r", stats.errors);
57 if (stats.completed != stats.submitted)
58 ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
59 if (check_err_stats && stats.errors != 0)
60 ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
67 await_hw(int16_t dev_id, uint16_t vchan)
69 enum rte_dma_vchan_status st;
71 if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
72 /* for drivers that don't support this op, just sleep for 1 millisecond */
73 rte_delay_us_sleep(1000);
77 /* for those that do, *max* end time is one second from now, but all should be faster */
78 const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
79 while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
81 rte_dma_vchan_status(dev_id, vchan, &st);
85 /* run a series of copy tests just using some different options for enqueues and completions */
87 do_multi_copies(int16_t dev_id, uint16_t vchan,
88 int split_batches, /* submit 2 x 16 or 1 x 32 burst */
89 int split_completions, /* gather 2 x 16 or 1 x 32 completions */
90 int use_completed_status) /* use completed or completed_status function */
92 struct rte_mbuf *srcs[32], *dsts[32];
93 enum rte_dma_status_code sc[32];
97 /* Enqueue burst of copies and hit doorbell */
98 for (i = 0; i < RTE_DIM(srcs); i++) {
101 if (split_batches && i == RTE_DIM(srcs) / 2)
102 rte_dma_submit(dev_id, vchan);
104 srcs[i] = rte_pktmbuf_alloc(pool);
105 dsts[i] = rte_pktmbuf_alloc(pool);
106 if (srcs[i] == NULL || dsts[i] == NULL)
107 ERR_RETURN("Error allocating buffers\n");
109 src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
110 for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
111 src_data[j] = rte_rand();
113 if (rte_dma_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off,
114 dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++)
115 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
117 rte_dma_submit(dev_id, vchan);
119 await_hw(dev_id, vchan);
121 if (split_completions) {
122 /* gather completions in two halves */
123 uint16_t half_len = RTE_DIM(srcs) / 2;
124 int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
125 if (ret != half_len || dma_err)
126 ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
127 ret, half_len, dma_err);
129 ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
130 if (ret != half_len || dma_err)
131 ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
132 ret, half_len, dma_err);
134 /* gather all completions in one go, using either
135 * completed or completed_status fns
137 if (!use_completed_status) {
138 int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
139 if (n != RTE_DIM(srcs) || dma_err)
140 ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
141 n, RTE_DIM(srcs), dma_err);
143 int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
144 if (n != RTE_DIM(srcs))
145 ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
148 for (j = 0; j < (uint16_t)n; j++)
149 if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
150 ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
155 /* check for empty */
156 int ret = use_completed_status ?
157 rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
158 rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
160 ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
162 for (i = 0; i < RTE_DIM(srcs); i++) {
163 char *src_data, *dst_data;
165 src_data = rte_pktmbuf_mtod(srcs[i], char *);
166 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
167 for (j = 0; j < COPY_LEN; j++)
168 if (src_data[j] != dst_data[j])
169 ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
171 rte_pktmbuf_free(srcs[i]);
172 rte_pktmbuf_free(dsts[i]);
178 test_enqueue_copies(int16_t dev_id, uint16_t vchan)
180 enum rte_dma_status_code status;
184 /* test doing a single copy */
186 struct rte_mbuf *src, *dst;
187 char *src_data, *dst_data;
189 src = rte_pktmbuf_alloc(pool);
190 dst = rte_pktmbuf_alloc(pool);
191 src_data = rte_pktmbuf_mtod(src, char *);
192 dst_data = rte_pktmbuf_mtod(dst, char *);
194 for (i = 0; i < COPY_LEN; i++)
195 src_data[i] = rte_rand() & 0xFF;
197 id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
198 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
200 ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
203 /* give time for copy to finish, then check it was done */
204 await_hw(dev_id, vchan);
206 for (i = 0; i < COPY_LEN; i++)
207 if (dst_data[i] != src_data[i])
208 ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
209 dst_data[i], src_data[i]);
211 /* now check completion works */
212 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
213 ERR_RETURN("Error with rte_dma_completed\n");
216 ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
219 /* check for completed and id when no job done */
220 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
221 ERR_RETURN("Error with rte_dma_completed when no job done\n");
223 ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
226 /* check for completed_status and id when no job done */
227 if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
228 ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
230 ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
233 rte_pktmbuf_free(src);
234 rte_pktmbuf_free(dst);
236 /* now check completion returns nothing more */
237 if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
238 ERR_RETURN("Error with rte_dma_completed in empty check\n");
244 /* test doing a multiple single copies */
246 const uint16_t max_ops = 4;
247 struct rte_mbuf *src, *dst;
248 char *src_data, *dst_data;
251 src = rte_pktmbuf_alloc(pool);
252 dst = rte_pktmbuf_alloc(pool);
253 src_data = rte_pktmbuf_mtod(src, char *);
254 dst_data = rte_pktmbuf_mtod(dst, char *);
256 for (i = 0; i < COPY_LEN; i++)
257 src_data[i] = rte_rand() & 0xFF;
259 /* perform the same copy <max_ops> times */
260 for (i = 0; i < max_ops; i++)
261 if (rte_dma_copy(dev_id, vchan,
262 rte_pktmbuf_iova(src),
263 rte_pktmbuf_iova(dst),
264 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
265 ERR_RETURN("Error with rte_dma_copy\n");
267 await_hw(dev_id, vchan);
269 count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
270 if (count != max_ops)
271 ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
274 if (id != id_count - 1)
275 ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
278 for (i = 0; i < COPY_LEN; i++)
279 if (dst_data[i] != src_data[i])
280 ERR_RETURN("Data mismatch at char %u\n", i);
282 rte_pktmbuf_free(src);
283 rte_pktmbuf_free(dst);
286 /* test doing multiple copies */
287 return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
288 /* enqueue 2 batches and then complete both */
289 || do_multi_copies(dev_id, vchan, 1, 0, 0)
290 /* enqueue 1 batch, then complete in two halves */
291 || do_multi_copies(dev_id, vchan, 0, 1, 0)
292 /* test using completed_status in place of regular completed API */
293 || do_multi_copies(dev_id, vchan, 0, 0, 1);
296 /* Failure handling test cases - global macros and variables for those tests*/
297 #define COMP_BURST_SZ 16
298 #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
301 test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
302 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
304 /* Test single full batch statuses with failures */
305 enum rte_dma_status_code status[COMP_BURST_SZ];
306 struct rte_dma_stats baseline, stats;
307 uint16_t invalid_addr_id = 0;
309 uint16_t count, status_count;
314 rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
315 for (i = 0; i < COMP_BURST_SZ; i++) {
316 int id = rte_dma_copy(dev_id, vchan,
317 (i == fail_idx ? 0 : (srcs[i]->buf_iova + srcs[i]->data_off)),
318 dsts[i]->buf_iova + dsts[i]->data_off,
319 COPY_LEN, OPT_FENCE(i));
321 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
323 invalid_addr_id = id;
325 rte_dma_submit(dev_id, vchan);
326 rte_dma_stats_get(dev_id, vchan, &stats);
327 if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
328 ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
329 stats.submitted, baseline.submitted + COMP_BURST_SZ);
331 await_hw(dev_id, vchan);
333 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
334 if (count != fail_idx)
335 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
338 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
340 if (idx != invalid_addr_id - 1)
341 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
342 fail_idx, idx, invalid_addr_id - 1);
344 /* all checks ok, now verify calling completed() again always returns 0 */
345 for (i = 0; i < 10; i++)
346 if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
347 || error == false || idx != (invalid_addr_id - 1))
348 ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
351 status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
353 /* some HW may stop on error and be restarted after getting error status for single value
354 * To handle this case, if we get just one error back, wait for more completions and get
355 * status for rest of the burst
357 if (status_count == 1) {
358 await_hw(dev_id, vchan);
359 status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
362 /* check that at this point we have all status values */
363 if (status_count != COMP_BURST_SZ - count)
364 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
365 fail_idx, status_count, COMP_BURST_SZ - count);
366 /* now verify just one failure followed by multiple successful or skipped entries */
367 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
368 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
370 for (i = 1; i < status_count; i++)
371 /* after a failure in a burst, depending on ordering/fencing,
372 * operations may be successful or skipped because of previous error.
374 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
375 && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
376 ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
377 fail_idx, count + i, COMP_BURST_SZ);
379 /* check the completed + errors stats are as expected */
380 rte_dma_stats_get(dev_id, vchan, &stats);
381 if (stats.completed != baseline.completed + COMP_BURST_SZ)
382 ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
383 stats.completed, baseline.completed + COMP_BURST_SZ);
384 for (i = 0; i < status_count; i++)
385 err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
386 if (stats.errors != baseline.errors + err_count)
387 ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
388 stats.errors, baseline.errors + err_count);
394 test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
395 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
397 /* Test gathering batch statuses one at a time */
398 enum rte_dma_status_code status[COMP_BURST_SZ];
399 uint16_t invalid_addr_id = 0;
401 uint16_t count = 0, status_count = 0;
405 for (j = 0; j < COMP_BURST_SZ; j++) {
406 int id = rte_dma_copy(dev_id, vchan,
407 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)),
408 dsts[j]->buf_iova + dsts[j]->data_off,
409 COPY_LEN, OPT_FENCE(j));
411 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
413 invalid_addr_id = id;
415 rte_dma_submit(dev_id, vchan);
416 await_hw(dev_id, vchan);
418 /* use regular "completed" until we hit error */
420 uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
422 if (n > 1 || count >= COMP_BURST_SZ)
423 ERR_RETURN("Error - too many completions got\n");
424 if (n == 0 && !error)
425 ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
428 if (idx != invalid_addr_id - 1)
429 ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
430 idx, invalid_addr_id - 1);
432 /* use completed_status until we hit end of burst */
433 while (count + status_count < COMP_BURST_SZ) {
434 uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
435 &status[status_count]);
436 await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
439 ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
443 /* check for single failure */
444 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
445 ERR_RETURN("Error, unexpected successful DMA transaction\n");
446 for (j = 1; j < status_count; j++)
447 if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
448 && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
449 ERR_RETURN("Error, unexpected DMA error reported\n");
455 test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
456 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
458 /* When error occurs just collect a single error using "completed_status()"
459 * before going to back to completed() calls
461 enum rte_dma_status_code status;
462 uint16_t invalid_addr_id = 0;
464 uint16_t count, status_count, count2;
468 for (j = 0; j < COMP_BURST_SZ; j++) {
469 int id = rte_dma_copy(dev_id, vchan,
470 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)),
471 dsts[j]->buf_iova + dsts[j]->data_off,
474 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
476 invalid_addr_id = id;
478 rte_dma_submit(dev_id, vchan);
479 await_hw(dev_id, vchan);
481 /* get up to the error point */
482 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
483 if (count != fail_idx)
484 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
487 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
489 if (idx != invalid_addr_id - 1)
490 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
491 fail_idx, idx, invalid_addr_id - 1);
493 /* get the error code */
494 status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
495 if (status_count != 1)
496 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
497 fail_idx, status_count, COMP_BURST_SZ - count);
498 if (status == RTE_DMA_STATUS_SUCCESSFUL)
499 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
502 /* delay in case time needed after err handled to complete other jobs */
503 await_hw(dev_id, vchan);
505 /* get the rest of the completions without status */
506 count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
508 ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
510 if (count + status_count + count2 != COMP_BURST_SZ)
511 ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
512 count + status_count + count2, COMP_BURST_SZ);
518 test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
519 const unsigned int *fail, size_t num_fail)
521 /* test having multiple errors in one go */
522 enum rte_dma_status_code status[COMP_BURST_SZ];
524 uint16_t count, err_count = 0;
527 /* enqueue and gather completions in one go */
528 for (j = 0; j < COMP_BURST_SZ; j++) {
529 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off;
530 /* set up for failure if the current index is anywhere is the fails array */
531 for (i = 0; i < num_fail; i++)
535 int id = rte_dma_copy(dev_id, vchan,
536 src, dsts[j]->buf_iova + dsts[j]->data_off,
539 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
541 rte_dma_submit(dev_id, vchan);
542 await_hw(dev_id, vchan);
544 count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
545 while (count < COMP_BURST_SZ) {
546 await_hw(dev_id, vchan);
548 uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
549 NULL, &status[count]);
551 ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
552 count, COMP_BURST_SZ);
555 for (i = 0; i < count; i++)
556 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
559 if (err_count != num_fail)
560 ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
561 err_count, num_fail);
563 /* enqueue and gather completions in bursts, but getting errors one at a time */
564 for (j = 0; j < COMP_BURST_SZ; j++) {
565 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off;
566 /* set up for failure if the current index is anywhere is the fails array */
567 for (i = 0; i < num_fail; i++)
571 int id = rte_dma_copy(dev_id, vchan,
572 src, dsts[j]->buf_iova + dsts[j]->data_off,
575 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
577 rte_dma_submit(dev_id, vchan);
578 await_hw(dev_id, vchan);
582 while (count + err_count < COMP_BURST_SZ) {
583 count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
585 uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
588 ERR_RETURN("Error getting error-status for completions\n");
590 await_hw(dev_id, vchan);
593 if (err_count != num_fail)
594 ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
595 err_count, num_fail);
601 test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
603 const unsigned int fail[] = {0, 7, 14, 15};
604 struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
607 for (i = 0; i < COMP_BURST_SZ; i++) {
608 srcs[i] = rte_pktmbuf_alloc(pool);
609 dsts[i] = rte_pktmbuf_alloc(pool);
612 for (i = 0; i < RTE_DIM(fail); i++) {
613 if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
616 if (test_individual_status_query_with_failure(dev_id, vchan, fence,
617 srcs, dsts, fail[i]) < 0)
620 /* test is run the same fenced, or unfenced, but no harm in running it twice */
621 if (test_single_item_status_query_with_failure(dev_id, vchan,
622 srcs, dsts, fail[i]) < 0)
626 if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
629 for (i = 0; i < COMP_BURST_SZ; i++) {
630 rte_pktmbuf_free(srcs[i]);
631 rte_pktmbuf_free(dsts[i]);
637 test_completion_handling(int16_t dev_id, uint16_t vchan)
639 return test_completion_status(dev_id, vchan, false) /* without fences */
640 || test_completion_status(dev_id, vchan, true); /* with fences */
644 test_enqueue_fill(int16_t dev_id, uint16_t vchan)
646 const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
647 struct rte_mbuf *dst;
649 uint64_t pattern = 0xfedcba9876543210;
652 dst = rte_pktmbuf_alloc(pool);
654 ERR_RETURN("Failed to allocate mbuf\n");
655 dst_data = rte_pktmbuf_mtod(dst, char *);
657 for (i = 0; i < RTE_DIM(lengths); i++) {
659 memset(dst_data, 0, rte_pktmbuf_data_len(dst));
661 /* perform the fill operation */
662 int id = rte_dma_fill(dev_id, vchan, pattern,
663 rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
665 ERR_RETURN("Error with rte_dma_fill\n");
666 await_hw(dev_id, vchan);
668 if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
669 ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
670 /* check the data from the fill operation is correct */
671 for (j = 0; j < lengths[i]; j++) {
672 char pat_byte = ((char *)&pattern)[j % 8];
673 if (dst_data[j] != pat_byte)
674 ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
675 lengths[i], dst_data[j], pat_byte);
677 /* check that the data after the fill operation was not written to */
678 for (; j < rte_pktmbuf_data_len(dst); j++)
679 if (dst_data[j] != 0)
680 ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
681 lengths[i], dst_data[j], 0);
684 rte_pktmbuf_free(dst);
689 test_burst_capacity(int16_t dev_id, uint16_t vchan)
691 #define CAP_TEST_BURST_SIZE 64
692 const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
693 struct rte_mbuf *src, *dst;
698 src = rte_pktmbuf_alloc(pool);
699 dst = rte_pktmbuf_alloc(pool);
701 /* to test capacity, we enqueue elements and check capacity is reduced
702 * by one each time - rebaselining the expected value after each burst
703 * as the capacity is only for a burst. We enqueue multiple bursts to
704 * fill up half the ring, before emptying it again. We do this multiple
705 * times to ensure that we get to test scenarios where we get ring
706 * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
708 for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
709 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
710 cap = rte_dma_burst_capacity(dev_id, vchan);
712 for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
713 ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
714 rte_pktmbuf_iova(dst), COPY_LEN, 0);
716 ERR_RETURN("Error with rte_dmadev_copy\n");
718 if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
719 ERR_RETURN("Error, ring capacity did not change as expected\n");
721 if (rte_dma_submit(dev_id, vchan) < 0)
722 ERR_RETURN("Error, failed to submit burst\n");
724 if (cap < rte_dma_burst_capacity(dev_id, vchan))
725 ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
727 await_hw(dev_id, vchan);
729 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
730 ret = rte_dma_completed(dev_id, vchan,
731 CAP_TEST_BURST_SIZE, NULL, &dma_err);
732 if (ret != CAP_TEST_BURST_SIZE || dma_err) {
733 enum rte_dma_status_code status;
735 rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
736 ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
737 ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
740 cap = rte_dma_burst_capacity(dev_id, vchan);
741 if (cap != ring_space)
742 ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
746 rte_pktmbuf_free(src);
747 rte_pktmbuf_free(dst);
753 test_dmadev_instance(int16_t dev_id)
755 #define TEST_RINGSIZE 512
756 #define CHECK_ERRS true
757 struct rte_dma_stats stats;
758 struct rte_dma_info info;
759 const struct rte_dma_conf conf = { .nb_vchans = 1};
760 const struct rte_dma_vchan_conf qconf = {
761 .direction = RTE_DMA_DIR_MEM_TO_MEM,
762 .nb_desc = TEST_RINGSIZE,
767 ret = rte_dma_info_get(dev_id, &info);
769 ERR_RETURN("Error with rte_dma_info_get()\n");
771 printf("\n### Test dmadev instance %u [%s]\n",
772 dev_id, info.dev_name);
774 if (info.max_vchans < 1)
775 ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
777 if (rte_dma_configure(dev_id, &conf) != 0)
778 ERR_RETURN("Error with rte_dma_configure()\n");
780 if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
781 ERR_RETURN("Error with queue configuration\n");
783 ret = rte_dma_info_get(dev_id, &info);
784 if (ret != 0 || info.nb_vchans != 1)
785 ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
787 if (rte_dma_start(dev_id) != 0)
788 ERR_RETURN("Error with rte_dma_start()\n");
790 if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
791 ERR_RETURN("Error with rte_dma_stats_get()\n");
793 if (rte_dma_burst_capacity(dev_id, vchan) < 32)
794 ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
796 if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
797 ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
798 "submitted = %"PRIu64", errors = %"PRIu64"\n",
799 stats.completed, stats.submitted, stats.errors);
802 /* create a mempool for running tests */
803 pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
804 TEST_RINGSIZE * 2, /* n == num elements */
807 2048, /* data room size */
810 ERR_RETURN("Error with mempool creation\n");
812 /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */
813 if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0)
816 /* run some burst capacity tests */
817 if (rte_dma_burst_capacity(dev_id, vchan) < 64)
818 printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
820 else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
823 /* to test error handling we can provide null pointers for source or dest in copies. This
824 * requires VA mode in DPDK, since NULL(0) is a valid physical address.
825 * We also need hardware that can report errors back.
827 if (rte_eal_iova_mode() != RTE_IOVA_VA)
828 printf("DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n", dev_id);
829 else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
830 printf("DMA Dev %u: device does not report errors, skipping error handling tests\n",
832 else if (runtest("error handling", test_completion_handling, 1,
833 dev_id, vchan, !CHECK_ERRS) < 0)
836 if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0)
837 printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id);
838 else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0)
841 rte_mempool_free(pool);
842 rte_dma_stop(dev_id);
843 rte_dma_stats_reset(dev_id, vchan);
847 rte_mempool_free(pool);
848 rte_dma_stop(dev_id);
855 const char *pmd = "dma_skeleton";
859 /* attempt to create skeleton instance - ignore errors due to one being already present */
860 rte_vdev_init(pmd, NULL);
861 id = rte_dma_get_dev_id_by_name(pmd);
864 printf("\n### Test dmadev infrastructure using skeleton driver\n");
865 ret = test_dma_api(id);
875 /* basic sanity on dmadev infrastructure */
877 ERR_RETURN("Error performing API tests\n");
879 if (rte_dma_count_avail() == 0)
882 RTE_DMA_FOREACH_DEV(i)
883 if (test_dmadev_instance(i) < 0)
884 ERR_RETURN("Error, test failure for device %d\n", i);
889 REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);