1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include "rte_rawdev.h"
9 #include "rte_ioat_rawdev.h"
10 #include "ioat_private.h"
12 #define MAX_SUPPORTED_RAWDEVS 64
13 #define TEST_SKIPPED 77
16 int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
18 static struct rte_mempool *pool;
19 static unsigned short expected_ring_size[MAX_SUPPORTED_RAWDEVS];
21 #define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
24 __rte_format_printf(3, 4)
25 print_err(const char *func, int lineno, const char *format, ...)
30 ret = fprintf(stderr, "In %s:%d - ", func, lineno);
32 ret += vfprintf(stderr, format, ap);
39 do_multi_copies(int dev_id, int split_batches, int split_completions)
41 struct rte_mbuf *srcs[32], *dsts[32];
42 struct rte_mbuf *completed_src[64];
43 struct rte_mbuf *completed_dst[64];
46 for (i = 0; i < RTE_DIM(srcs); i++) {
49 if (split_batches && i == RTE_DIM(srcs) / 2)
50 rte_ioat_perform_ops(dev_id);
52 srcs[i] = rte_pktmbuf_alloc(pool);
53 dsts[i] = rte_pktmbuf_alloc(pool);
54 src_data = rte_pktmbuf_mtod(srcs[i], char *);
56 for (j = 0; j < COPY_LEN; j++)
57 src_data[j] = rand() & 0xFF;
59 if (rte_ioat_enqueue_copy(dev_id,
60 srcs[i]->buf_iova + srcs[i]->data_off,
61 dsts[i]->buf_iova + dsts[i]->data_off,
64 (uintptr_t)dsts[i]) != 1) {
65 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
70 rte_ioat_perform_ops(dev_id);
73 if (split_completions) {
74 /* gather completions in two halves */
75 uint16_t half_len = RTE_DIM(srcs) / 2;
76 if (rte_ioat_completed_ops(dev_id, half_len, (void *)completed_src,
77 (void *)completed_dst) != half_len) {
78 PRINT_ERR("Error with rte_ioat_completed_ops - first half request\n");
79 rte_rawdev_dump(dev_id, stdout);
82 if (rte_ioat_completed_ops(dev_id, half_len, (void *)&completed_src[half_len],
83 (void *)&completed_dst[half_len]) != half_len) {
84 PRINT_ERR("Error with rte_ioat_completed_ops - second half request\n");
85 rte_rawdev_dump(dev_id, stdout);
89 /* gather all completions in one go */
90 if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src,
91 (void *)completed_dst) != RTE_DIM(srcs)) {
92 PRINT_ERR("Error with rte_ioat_completed_ops\n");
93 rte_rawdev_dump(dev_id, stdout);
97 for (i = 0; i < RTE_DIM(srcs); i++) {
98 char *src_data, *dst_data;
100 if (completed_src[i] != srcs[i]) {
101 PRINT_ERR("Error with source pointer %u\n", i);
104 if (completed_dst[i] != dsts[i]) {
105 PRINT_ERR("Error with dest pointer %u\n", i);
109 src_data = rte_pktmbuf_mtod(srcs[i], char *);
110 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
111 for (j = 0; j < COPY_LEN; j++)
112 if (src_data[j] != dst_data[j]) {
113 PRINT_ERR("Error with copy of packet %u, byte %u\n",
117 rte_pktmbuf_free(srcs[i]);
118 rte_pktmbuf_free(dsts[i]);
124 test_enqueue_copies(int dev_id)
128 /* test doing a single copy */
130 struct rte_mbuf *src, *dst;
131 char *src_data, *dst_data;
132 struct rte_mbuf *completed[2] = {0};
134 src = rte_pktmbuf_alloc(pool);
135 dst = rte_pktmbuf_alloc(pool);
136 src_data = rte_pktmbuf_mtod(src, char *);
137 dst_data = rte_pktmbuf_mtod(dst, char *);
139 for (i = 0; i < COPY_LEN; i++)
140 src_data[i] = rand() & 0xFF;
142 if (rte_ioat_enqueue_copy(dev_id,
143 src->buf_iova + src->data_off,
144 dst->buf_iova + dst->data_off,
147 (uintptr_t)dst) != 1) {
148 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
151 rte_ioat_perform_ops(dev_id);
154 if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
155 (void *)&completed[1]) != 1) {
156 PRINT_ERR("Error with rte_ioat_completed_ops\n");
159 if (completed[0] != src || completed[1] != dst) {
160 PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
161 completed[0], completed[1], src, dst);
165 for (i = 0; i < COPY_LEN; i++)
166 if (dst_data[i] != src_data[i]) {
167 PRINT_ERR("Data mismatch at char %u [Got %02x not %02x]\n",
168 i, dst_data[i], src_data[i]);
171 rte_pktmbuf_free(src);
172 rte_pktmbuf_free(dst);
175 /* test doing a multiple single copies */
177 const uint16_t max_ops = 4;
178 struct rte_mbuf *src, *dst;
179 char *src_data, *dst_data;
180 struct rte_mbuf *completed[32] = {0};
181 const uint16_t max_completions = RTE_DIM(completed) / 2;
183 src = rte_pktmbuf_alloc(pool);
184 dst = rte_pktmbuf_alloc(pool);
185 src_data = rte_pktmbuf_mtod(src, char *);
186 dst_data = rte_pktmbuf_mtod(dst, char *);
188 for (i = 0; i < COPY_LEN; i++)
189 src_data[i] = rand() & 0xFF;
191 /* perform the same copy <max_ops> times */
192 for (i = 0; i < max_ops; i++) {
193 if (rte_ioat_enqueue_copy(dev_id,
194 src->buf_iova + src->data_off,
195 dst->buf_iova + dst->data_off,
198 (uintptr_t)dst) != 1) {
199 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
202 rte_ioat_perform_ops(dev_id);
206 if (rte_ioat_completed_ops(dev_id, max_completions, (void *)&completed[0],
207 (void *)&completed[max_completions]) != max_ops) {
208 PRINT_ERR("Error with rte_ioat_completed_ops\n");
209 rte_rawdev_dump(dev_id, stdout);
212 if (completed[0] != src || completed[max_completions] != dst) {
213 PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
214 completed[0], completed[max_completions], src, dst);
218 for (i = 0; i < COPY_LEN; i++)
219 if (dst_data[i] != src_data[i]) {
220 PRINT_ERR("Data mismatch at char %u\n", i);
223 rte_pktmbuf_free(src);
224 rte_pktmbuf_free(dst);
227 /* test doing multiple copies */
228 do_multi_copies(dev_id, 0, 0); /* enqueue and complete one batch at a time */
229 do_multi_copies(dev_id, 1, 0); /* enqueue 2 batches and then complete both */
230 do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
235 test_enqueue_fill(int dev_id)
237 const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
238 struct rte_mbuf *dst = rte_pktmbuf_alloc(pool);
239 char *dst_data = rte_pktmbuf_mtod(dst, char *);
240 struct rte_mbuf *completed[2] = {0};
241 uint64_t pattern = 0xfedcba9876543210;
244 for (i = 0; i < RTE_DIM(lengths); i++) {
246 memset(dst_data, 0, lengths[i]);
248 /* perform the fill operation */
249 if (rte_ioat_enqueue_fill(dev_id, pattern,
250 dst->buf_iova + dst->data_off, lengths[i],
251 (uintptr_t)dst) != 1) {
252 PRINT_ERR("Error with rte_ioat_enqueue_fill\n");
256 rte_ioat_perform_ops(dev_id);
259 if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
260 (void *)&completed[1]) != 1) {
261 PRINT_ERR("Error with completed ops\n");
264 /* check the result */
265 for (j = 0; j < lengths[i]; j++) {
266 char pat_byte = ((char *)&pattern)[j % 8];
267 if (dst_data[j] != pat_byte) {
268 PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
269 lengths[i], dst_data[j],
276 rte_pktmbuf_free(dst);
281 test_burst_capacity(int dev_id)
283 #define BURST_SIZE 64
284 const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
285 struct rte_mbuf *src, *dst;
286 unsigned int length = 1024;
287 unsigned int i, j, iter;
288 unsigned int old_cap, cap;
289 uintptr_t completions[BURST_SIZE];
291 src = rte_pktmbuf_alloc(pool);
292 dst = rte_pktmbuf_alloc(pool);
294 old_cap = ring_space;
295 /* to test capacity, we enqueue elements and check capacity is reduced
296 * by one each time - rebaselining the expected value after each burst
297 * as the capacity is only for a burst. We enqueue multiple bursts to
298 * fill up half the ring, before emptying it again. We do this twice to
299 * ensure that we get to test scenarios where we get ring wrap-around
301 for (iter = 0; iter < 2; iter++) {
302 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
303 cap = rte_ioat_burst_capacity(dev_id);
305 PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
310 for (j = 0; j < BURST_SIZE; j++) {
311 if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
312 rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
313 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
316 if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
317 PRINT_ERR("Error, ring capacity did not change as expected\n");
321 rte_ioat_perform_ops(dev_id);
324 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
325 if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
326 completions, completions) != BURST_SIZE) {
327 PRINT_ERR("Error with completions\n");
331 if (rte_ioat_burst_capacity(dev_id) != ring_space) {
332 PRINT_ERR("Error, ring capacity has not reset to original value\n");
335 old_cap = ring_space;
338 rte_pktmbuf_free(src);
339 rte_pktmbuf_free(dst);
345 ioat_rawdev_test(uint16_t dev_id)
347 #define IOAT_TEST_RINGSIZE 512
348 struct rte_ioat_rawdev_config p = { .ring_size = -1 };
349 struct rte_rawdev_info info = { .dev_private = &p };
350 struct rte_rawdev_xstats_name *snames = NULL;
351 uint64_t *stats = NULL;
352 unsigned int *ids = NULL;
353 unsigned int nb_xstats;
356 if (dev_id >= MAX_SUPPORTED_RAWDEVS) {
357 printf("Skipping test. Cannot test rawdevs with id's greater than %d\n",
358 MAX_SUPPORTED_RAWDEVS);
362 rte_rawdev_info_get(dev_id, &info, sizeof(p));
363 if (p.ring_size != expected_ring_size[dev_id]) {
364 PRINT_ERR("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
365 (int)p.ring_size, expected_ring_size[dev_id]);
369 p.ring_size = IOAT_TEST_RINGSIZE;
370 if (rte_rawdev_configure(dev_id, &info, sizeof(p)) != 0) {
371 PRINT_ERR("Error with rte_rawdev_configure()\n");
374 rte_rawdev_info_get(dev_id, &info, sizeof(p));
375 if (p.ring_size != IOAT_TEST_RINGSIZE) {
376 PRINT_ERR("Error, ring size is not %d (%d)\n",
377 IOAT_TEST_RINGSIZE, (int)p.ring_size);
380 expected_ring_size[dev_id] = p.ring_size;
382 if (rte_rawdev_start(dev_id) != 0) {
383 PRINT_ERR("Error with rte_rawdev_start()\n");
387 pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
388 p.ring_size * 2, /* n == num elements */
391 2048, /* data room size */
394 PRINT_ERR("Error with mempool creation\n");
398 /* allocate memory for xstats names and values */
399 nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
401 snames = malloc(sizeof(*snames) * nb_xstats);
402 if (snames == NULL) {
403 PRINT_ERR("Error allocating xstat names memory\n");
406 rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
408 ids = malloc(sizeof(*ids) * nb_xstats);
410 PRINT_ERR("Error allocating xstat ids memory\n");
413 for (i = 0; i < nb_xstats; i++)
416 stats = malloc(sizeof(*stats) * nb_xstats);
418 PRINT_ERR("Error allocating xstat memory\n");
422 /* run the test cases */
423 printf("Running Copy Tests\n");
424 for (i = 0; i < 100; i++) {
427 if (test_enqueue_copies(dev_id) != 0)
430 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
431 for (j = 0; j < nb_xstats; j++)
432 printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
437 /* test enqueue fill operation */
438 printf("Running Fill Tests\n");
439 for (i = 0; i < 100; i++) {
442 if (test_enqueue_fill(dev_id) != 0)
445 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
446 for (j = 0; j < nb_xstats; j++)
447 printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
452 printf("Running Burst Capacity Test\n");
453 if (test_burst_capacity(dev_id) != 0)
456 rte_rawdev_stop(dev_id);
457 if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
458 PRINT_ERR("Error resetting xstat values\n");
462 rte_mempool_free(pool);
469 rte_rawdev_stop(dev_id);
470 rte_rawdev_xstats_reset(dev_id, NULL, 0);
471 rte_mempool_free(pool);