1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
8 #include <rte_dmadev.h>
10 #include <rte_pause.h>
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_dmadev_pmd.h>
17 #include "test_dmadev_api.h"
19 #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
23 static struct rte_mempool *pool;
24 static uint16_t id_count;
27 __rte_format_printf(3, 4)
28 print_err(const char *func, int lineno, const char *format, ...)
32 fprintf(stderr, "In %s:%d - ", func, lineno);
34 vfprintf(stderr, format, ap);
39 runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations,
40 int16_t dev_id, uint16_t vchan, bool check_err_stats)
42 struct rte_dma_stats stats;
45 rte_dma_stats_reset(dev_id, vchan);
46 printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
47 check_err_stats ? " " : "(errors expected)");
48 for (i = 0; i < iterations; i++) {
49 if (test_fn(dev_id, vchan) < 0)
52 rte_dma_stats_get(dev_id, 0, &stats);
53 printf("Ops submitted: %"PRIu64"\t", stats.submitted);
54 printf("Ops completed: %"PRIu64"\t", stats.completed);
55 printf("Errors: %"PRIu64"\r", stats.errors);
57 if (stats.completed != stats.submitted)
58 ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
59 if (check_err_stats && stats.errors != 0)
60 ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
67 await_hw(int16_t dev_id, uint16_t vchan)
69 enum rte_dma_vchan_status st;
71 if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
72 /* for drivers that don't support this op, just sleep for 1 millisecond */
73 rte_delay_us_sleep(1000);
77 /* for those that do, *max* end time is one second from now, but all should be faster */
78 const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
79 while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
81 rte_dma_vchan_status(dev_id, vchan, &st);
85 /* run a series of copy tests just using some different options for enqueues and completions */
87 do_multi_copies(int16_t dev_id, uint16_t vchan,
88 int split_batches, /* submit 2 x 16 or 1 x 32 burst */
89 int split_completions, /* gather 2 x 16 or 1 x 32 completions */
90 int use_completed_status) /* use completed or completed_status function */
92 struct rte_mbuf *srcs[32], *dsts[32];
93 enum rte_dma_status_code sc[32];
97 /* Enqueue burst of copies and hit doorbell */
98 for (i = 0; i < RTE_DIM(srcs); i++) {
101 if (split_batches && i == RTE_DIM(srcs) / 2)
102 rte_dma_submit(dev_id, vchan);
104 srcs[i] = rte_pktmbuf_alloc(pool);
105 dsts[i] = rte_pktmbuf_alloc(pool);
106 if (srcs[i] == NULL || dsts[i] == NULL)
107 ERR_RETURN("Error allocating buffers\n");
109 src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
110 for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
111 src_data[j] = rte_rand();
113 if (rte_dma_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off,
114 dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++)
115 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
117 rte_dma_submit(dev_id, vchan);
119 await_hw(dev_id, vchan);
121 if (split_completions) {
122 /* gather completions in two halves */
123 uint16_t half_len = RTE_DIM(srcs) / 2;
124 int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
125 if (ret != half_len || dma_err)
126 ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
127 ret, half_len, dma_err);
129 ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
130 if (ret != half_len || dma_err)
131 ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
132 ret, half_len, dma_err);
134 /* gather all completions in one go, using either
135 * completed or completed_status fns
137 if (!use_completed_status) {
138 int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
139 if (n != RTE_DIM(srcs) || dma_err)
140 ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
141 n, RTE_DIM(srcs), dma_err);
143 int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
144 if (n != RTE_DIM(srcs))
145 ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
148 for (j = 0; j < (uint16_t)n; j++)
149 if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
150 ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
155 /* check for empty */
156 int ret = use_completed_status ?
157 rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
158 rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
160 ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
162 for (i = 0; i < RTE_DIM(srcs); i++) {
163 char *src_data, *dst_data;
165 src_data = rte_pktmbuf_mtod(srcs[i], char *);
166 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
167 for (j = 0; j < COPY_LEN; j++)
168 if (src_data[j] != dst_data[j])
169 ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
171 rte_pktmbuf_free(srcs[i]);
172 rte_pktmbuf_free(dsts[i]);
178 test_enqueue_copies(int16_t dev_id, uint16_t vchan)
183 /* test doing a single copy */
185 struct rte_mbuf *src, *dst;
186 char *src_data, *dst_data;
188 src = rte_pktmbuf_alloc(pool);
189 dst = rte_pktmbuf_alloc(pool);
190 src_data = rte_pktmbuf_mtod(src, char *);
191 dst_data = rte_pktmbuf_mtod(dst, char *);
193 for (i = 0; i < COPY_LEN; i++)
194 src_data[i] = rte_rand() & 0xFF;
196 id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
197 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
199 ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
202 /* give time for copy to finish, then check it was done */
203 await_hw(dev_id, vchan);
205 for (i = 0; i < COPY_LEN; i++)
206 if (dst_data[i] != src_data[i])
207 ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
208 dst_data[i], src_data[i]);
210 /* now check completion works */
211 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
212 ERR_RETURN("Error with rte_dma_completed\n");
215 ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
218 rte_pktmbuf_free(src);
219 rte_pktmbuf_free(dst);
221 /* now check completion returns nothing more */
222 if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
223 ERR_RETURN("Error with rte_dma_completed in empty check\n");
229 /* test doing a multiple single copies */
231 const uint16_t max_ops = 4;
232 struct rte_mbuf *src, *dst;
233 char *src_data, *dst_data;
236 src = rte_pktmbuf_alloc(pool);
237 dst = rte_pktmbuf_alloc(pool);
238 src_data = rte_pktmbuf_mtod(src, char *);
239 dst_data = rte_pktmbuf_mtod(dst, char *);
241 for (i = 0; i < COPY_LEN; i++)
242 src_data[i] = rte_rand() & 0xFF;
244 /* perform the same copy <max_ops> times */
245 for (i = 0; i < max_ops; i++)
246 if (rte_dma_copy(dev_id, vchan,
247 rte_pktmbuf_iova(src),
248 rte_pktmbuf_iova(dst),
249 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
250 ERR_RETURN("Error with rte_dma_copy\n");
252 await_hw(dev_id, vchan);
254 count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
255 if (count != max_ops)
256 ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
259 if (id != id_count - 1)
260 ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
263 for (i = 0; i < COPY_LEN; i++)
264 if (dst_data[i] != src_data[i])
265 ERR_RETURN("Data mismatch at char %u\n", i);
267 rte_pktmbuf_free(src);
268 rte_pktmbuf_free(dst);
271 /* test doing multiple copies */
272 return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
273 /* enqueue 2 batches and then complete both */
274 || do_multi_copies(dev_id, vchan, 1, 0, 0)
275 /* enqueue 1 batch, then complete in two halves */
276 || do_multi_copies(dev_id, vchan, 0, 1, 0)
277 /* test using completed_status in place of regular completed API */
278 || do_multi_copies(dev_id, vchan, 0, 0, 1);
282 test_dmadev_instance(int16_t dev_id)
284 #define TEST_RINGSIZE 512
285 #define CHECK_ERRS true
286 struct rte_dma_stats stats;
287 struct rte_dma_info info;
288 const struct rte_dma_conf conf = { .nb_vchans = 1};
289 const struct rte_dma_vchan_conf qconf = {
290 .direction = RTE_DMA_DIR_MEM_TO_MEM,
291 .nb_desc = TEST_RINGSIZE,
295 printf("\n### Test dmadev instance %u [%s]\n",
296 dev_id, rte_dma_devices[dev_id].data->dev_name);
298 rte_dma_info_get(dev_id, &info);
299 if (info.max_vchans < 1)
300 ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
302 if (rte_dma_configure(dev_id, &conf) != 0)
303 ERR_RETURN("Error with rte_dma_configure()\n");
305 if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
306 ERR_RETURN("Error with queue configuration\n");
308 rte_dma_info_get(dev_id, &info);
309 if (info.nb_vchans != 1)
310 ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
312 if (rte_dma_start(dev_id) != 0)
313 ERR_RETURN("Error with rte_dma_start()\n");
315 if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
316 ERR_RETURN("Error with rte_dma_stats_get()\n");
318 if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
319 ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
320 "submitted = %"PRIu64", errors = %"PRIu64"\n",
321 stats.completed, stats.submitted, stats.errors);
324 /* create a mempool for running tests */
325 pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
326 TEST_RINGSIZE * 2, /* n == num elements */
329 2048, /* data room size */
332 ERR_RETURN("Error with mempool creation\n");
334 /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */
335 if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0)
338 rte_mempool_free(pool);
339 rte_dma_stop(dev_id);
340 rte_dma_stats_reset(dev_id, vchan);
344 rte_mempool_free(pool);
345 rte_dma_stop(dev_id);
352 const char *pmd = "dma_skeleton";
356 /* attempt to create skeleton instance - ignore errors due to one being already present */
357 rte_vdev_init(pmd, NULL);
358 id = rte_dma_get_dev_id_by_name(pmd);
361 printf("\n### Test dmadev infrastructure using skeleton driver\n");
362 ret = test_dma_api(id);
372 /* basic sanity on dmadev infrastructure */
374 ERR_RETURN("Error performing API tests\n");
376 if (rte_dma_count_avail() == 0)
379 RTE_DMA_FOREACH_DEV(i)
380 if (test_dmadev_instance(i) < 0)
381 ERR_RETURN("Error, test failure for device %d\n", i);
386 REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);