raw/ioat: add API to query remaining ring space
[dpdk.git] / drivers / raw / ioat / ioat_rawdev_test.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <unistd.h>
6 #include <inttypes.h>
7 #include <rte_mbuf.h>
8 #include "rte_rawdev.h"
9 #include "rte_ioat_rawdev.h"
10 #include "ioat_private.h"
11
12 #define MAX_SUPPORTED_RAWDEVS 64
13 #define TEST_SKIPPED 77
14 #define COPY_LEN 1024
15
16 int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
17
18 static struct rte_mempool *pool;
19 static unsigned short expected_ring_size[MAX_SUPPORTED_RAWDEVS];
20
21 #define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
22
23 static inline int
24 __rte_format_printf(3, 4)
25 print_err(const char *func, int lineno, const char *format, ...)
26 {
27         va_list ap;
28         int ret;
29
30         ret = fprintf(stderr, "In %s:%d - ", func, lineno);
31         va_start(ap, format);
32         ret += vfprintf(stderr, format, ap);
33         va_end(ap);
34
35         return ret;
36 }
37
38 static int
39 do_multi_copies(int dev_id, int split_batches, int split_completions)
40 {
41         struct rte_mbuf *srcs[32], *dsts[32];
42         struct rte_mbuf *completed_src[64];
43         struct rte_mbuf *completed_dst[64];
44         unsigned int i, j;
45
46         for (i = 0; i < RTE_DIM(srcs); i++) {
47                 char *src_data;
48
49                 if (split_batches && i == RTE_DIM(srcs) / 2)
50                         rte_ioat_perform_ops(dev_id);
51
52                 srcs[i] = rte_pktmbuf_alloc(pool);
53                 dsts[i] = rte_pktmbuf_alloc(pool);
54                 src_data = rte_pktmbuf_mtod(srcs[i], char *);
55
56                 for (j = 0; j < COPY_LEN; j++)
57                         src_data[j] = rand() & 0xFF;
58
59                 if (rte_ioat_enqueue_copy(dev_id,
60                                 srcs[i]->buf_iova + srcs[i]->data_off,
61                                 dsts[i]->buf_iova + dsts[i]->data_off,
62                                 COPY_LEN,
63                                 (uintptr_t)srcs[i],
64                                 (uintptr_t)dsts[i]) != 1) {
65                         PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
66                                         i);
67                         return -1;
68                 }
69         }
70         rte_ioat_perform_ops(dev_id);
71         usleep(100);
72
73         if (split_completions) {
74                 /* gather completions in two halves */
75                 uint16_t half_len = RTE_DIM(srcs) / 2;
76                 if (rte_ioat_completed_ops(dev_id, half_len, (void *)completed_src,
77                                 (void *)completed_dst) != half_len) {
78                         PRINT_ERR("Error with rte_ioat_completed_ops - first half request\n");
79                         rte_rawdev_dump(dev_id, stdout);
80                         return -1;
81                 }
82                 if (rte_ioat_completed_ops(dev_id, half_len, (void *)&completed_src[half_len],
83                                 (void *)&completed_dst[half_len]) != half_len) {
84                         PRINT_ERR("Error with rte_ioat_completed_ops - second half request\n");
85                         rte_rawdev_dump(dev_id, stdout);
86                         return -1;
87                 }
88         } else {
89                 /* gather all completions in one go */
90                 if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src,
91                                 (void *)completed_dst) != RTE_DIM(srcs)) {
92                         PRINT_ERR("Error with rte_ioat_completed_ops\n");
93                         rte_rawdev_dump(dev_id, stdout);
94                         return -1;
95                 }
96         }
97         for (i = 0; i < RTE_DIM(srcs); i++) {
98                 char *src_data, *dst_data;
99
100                 if (completed_src[i] != srcs[i]) {
101                         PRINT_ERR("Error with source pointer %u\n", i);
102                         return -1;
103                 }
104                 if (completed_dst[i] != dsts[i]) {
105                         PRINT_ERR("Error with dest pointer %u\n", i);
106                         return -1;
107                 }
108
109                 src_data = rte_pktmbuf_mtod(srcs[i], char *);
110                 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
111                 for (j = 0; j < COPY_LEN; j++)
112                         if (src_data[j] != dst_data[j]) {
113                                 PRINT_ERR("Error with copy of packet %u, byte %u\n",
114                                                 i, j);
115                                 return -1;
116                         }
117                 rte_pktmbuf_free(srcs[i]);
118                 rte_pktmbuf_free(dsts[i]);
119         }
120         return 0;
121 }
122
123 static int
124 test_enqueue_copies(int dev_id)
125 {
126         unsigned int i;
127
128         /* test doing a single copy */
129         do {
130                 struct rte_mbuf *src, *dst;
131                 char *src_data, *dst_data;
132                 struct rte_mbuf *completed[2] = {0};
133
134                 src = rte_pktmbuf_alloc(pool);
135                 dst = rte_pktmbuf_alloc(pool);
136                 src_data = rte_pktmbuf_mtod(src, char *);
137                 dst_data = rte_pktmbuf_mtod(dst, char *);
138
139                 for (i = 0; i < COPY_LEN; i++)
140                         src_data[i] = rand() & 0xFF;
141
142                 if (rte_ioat_enqueue_copy(dev_id,
143                                 src->buf_iova + src->data_off,
144                                 dst->buf_iova + dst->data_off,
145                                 COPY_LEN,
146                                 (uintptr_t)src,
147                                 (uintptr_t)dst) != 1) {
148                         PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
149                         return -1;
150                 }
151                 rte_ioat_perform_ops(dev_id);
152                 usleep(10);
153
154                 if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
155                                 (void *)&completed[1]) != 1) {
156                         PRINT_ERR("Error with rte_ioat_completed_ops\n");
157                         return -1;
158                 }
159                 if (completed[0] != src || completed[1] != dst) {
160                         PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
161                                         completed[0], completed[1], src, dst);
162                         return -1;
163                 }
164
165                 for (i = 0; i < COPY_LEN; i++)
166                         if (dst_data[i] != src_data[i]) {
167                                 PRINT_ERR("Data mismatch at char %u [Got %02x not %02x]\n",
168                                                 i, dst_data[i], src_data[i]);
169                                 return -1;
170                         }
171                 rte_pktmbuf_free(src);
172                 rte_pktmbuf_free(dst);
173         } while (0);
174
175         /* test doing a multiple single copies */
176         do {
177                 const uint16_t max_ops = 4;
178                 struct rte_mbuf *src, *dst;
179                 char *src_data, *dst_data;
180                 struct rte_mbuf *completed[32] = {0};
181                 const uint16_t max_completions = RTE_DIM(completed) / 2;
182
183                 src = rte_pktmbuf_alloc(pool);
184                 dst = rte_pktmbuf_alloc(pool);
185                 src_data = rte_pktmbuf_mtod(src, char *);
186                 dst_data = rte_pktmbuf_mtod(dst, char *);
187
188                 for (i = 0; i < COPY_LEN; i++)
189                         src_data[i] = rand() & 0xFF;
190
191                 /* perform the same copy <max_ops> times */
192                 for (i = 0; i < max_ops; i++) {
193                         if (rte_ioat_enqueue_copy(dev_id,
194                                         src->buf_iova + src->data_off,
195                                         dst->buf_iova + dst->data_off,
196                                         COPY_LEN,
197                                         (uintptr_t)src,
198                                         (uintptr_t)dst) != 1) {
199                                 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
200                                 return -1;
201                         }
202                         rte_ioat_perform_ops(dev_id);
203                 }
204                 usleep(10);
205
206                 if (rte_ioat_completed_ops(dev_id, max_completions, (void *)&completed[0],
207                                 (void *)&completed[max_completions]) != max_ops) {
208                         PRINT_ERR("Error with rte_ioat_completed_ops\n");
209                         rte_rawdev_dump(dev_id, stdout);
210                         return -1;
211                 }
212                 if (completed[0] != src || completed[max_completions] != dst) {
213                         PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
214                                         completed[0], completed[max_completions], src, dst);
215                         return -1;
216                 }
217
218                 for (i = 0; i < COPY_LEN; i++)
219                         if (dst_data[i] != src_data[i]) {
220                                 PRINT_ERR("Data mismatch at char %u\n", i);
221                                 return -1;
222                         }
223                 rte_pktmbuf_free(src);
224                 rte_pktmbuf_free(dst);
225         } while (0);
226
227         /* test doing multiple copies */
228         do_multi_copies(dev_id, 0, 0); /* enqueue and complete one batch at a time */
229         do_multi_copies(dev_id, 1, 0); /* enqueue 2 batches and then complete both */
230         do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
231         return 0;
232 }
233
234 static int
235 test_enqueue_fill(int dev_id)
236 {
237         const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
238         struct rte_mbuf *dst = rte_pktmbuf_alloc(pool);
239         char *dst_data = rte_pktmbuf_mtod(dst, char *);
240         struct rte_mbuf *completed[2] = {0};
241         uint64_t pattern = 0xfedcba9876543210;
242         unsigned int i, j;
243
244         for (i = 0; i < RTE_DIM(lengths); i++) {
245                 /* reset dst_data */
246                 memset(dst_data, 0, lengths[i]);
247
248                 /* perform the fill operation */
249                 if (rte_ioat_enqueue_fill(dev_id, pattern,
250                                 dst->buf_iova + dst->data_off, lengths[i],
251                                 (uintptr_t)dst) != 1) {
252                         PRINT_ERR("Error with rte_ioat_enqueue_fill\n");
253                         return -1;
254                 }
255
256                 rte_ioat_perform_ops(dev_id);
257                 usleep(100);
258
259                 if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
260                         (void *)&completed[1]) != 1) {
261                         PRINT_ERR("Error with completed ops\n");
262                         return -1;
263                 }
264                 /* check the result */
265                 for (j = 0; j < lengths[i]; j++) {
266                         char pat_byte = ((char *)&pattern)[j % 8];
267                         if (dst_data[j] != pat_byte) {
268                                 PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
269                                                 lengths[i], dst_data[j],
270                                                 pat_byte);
271                                 return -1;
272                         }
273                 }
274         }
275
276         rte_pktmbuf_free(dst);
277         return 0;
278 }
279
280 static int
281 test_burst_capacity(int dev_id)
282 {
283 #define BURST_SIZE                      64
284         const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
285         struct rte_mbuf *src, *dst;
286         unsigned int length = 1024;
287         unsigned int i, j, iter;
288         unsigned int old_cap, cap;
289         uintptr_t completions[BURST_SIZE];
290
291         src = rte_pktmbuf_alloc(pool);
292         dst = rte_pktmbuf_alloc(pool);
293
294         old_cap = ring_space;
295         /* to test capacity, we enqueue elements and check capacity is reduced
296          * by one each time - rebaselining the expected value after each burst
297          * as the capacity is only for a burst. We enqueue multiple bursts to
298          * fill up half the ring, before emptying it again. We do this twice to
299          * ensure that we get to test scenarios where we get ring wrap-around
300          */
301         for (iter = 0; iter < 2; iter++) {
302                 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
303                         cap = rte_ioat_burst_capacity(dev_id);
304                         if (cap > old_cap) {
305                                 PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
306                                 return -1;
307                         }
308                         old_cap = cap;
309
310                         for (j = 0; j < BURST_SIZE; j++) {
311                                 if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
312                                                 rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
313                                         PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
314                                         return -1;
315                                 }
316                                 if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
317                                         PRINT_ERR("Error, ring capacity did not change as expected\n");
318                                         return -1;
319                                 }
320                         }
321                         rte_ioat_perform_ops(dev_id);
322                 }
323                 usleep(100);
324                 for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
325                         if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
326                                         completions, completions) != BURST_SIZE) {
327                                 PRINT_ERR("Error with completions\n");
328                                 return -1;
329                         }
330                 }
331                 if (rte_ioat_burst_capacity(dev_id) != ring_space) {
332                         PRINT_ERR("Error, ring capacity has not reset to original value\n");
333                         return -1;
334                 }
335                 old_cap = ring_space;
336         }
337
338         rte_pktmbuf_free(src);
339         rte_pktmbuf_free(dst);
340
341         return 0;
342 }
343
344 int
345 ioat_rawdev_test(uint16_t dev_id)
346 {
347 #define IOAT_TEST_RINGSIZE 512
348         struct rte_ioat_rawdev_config p = { .ring_size = -1 };
349         struct rte_rawdev_info info = { .dev_private = &p };
350         struct rte_rawdev_xstats_name *snames = NULL;
351         uint64_t *stats = NULL;
352         unsigned int *ids = NULL;
353         unsigned int nb_xstats;
354         unsigned int i;
355
356         if (dev_id >= MAX_SUPPORTED_RAWDEVS) {
357                 printf("Skipping test. Cannot test rawdevs with id's greater than %d\n",
358                                 MAX_SUPPORTED_RAWDEVS);
359                 return TEST_SKIPPED;
360         }
361
362         rte_rawdev_info_get(dev_id, &info, sizeof(p));
363         if (p.ring_size != expected_ring_size[dev_id]) {
364                 PRINT_ERR("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
365                                 (int)p.ring_size, expected_ring_size[dev_id]);
366                 return -1;
367         }
368
369         p.ring_size = IOAT_TEST_RINGSIZE;
370         if (rte_rawdev_configure(dev_id, &info, sizeof(p)) != 0) {
371                 PRINT_ERR("Error with rte_rawdev_configure()\n");
372                 return -1;
373         }
374         rte_rawdev_info_get(dev_id, &info, sizeof(p));
375         if (p.ring_size != IOAT_TEST_RINGSIZE) {
376                 PRINT_ERR("Error, ring size is not %d (%d)\n",
377                                 IOAT_TEST_RINGSIZE, (int)p.ring_size);
378                 return -1;
379         }
380         expected_ring_size[dev_id] = p.ring_size;
381
382         if (rte_rawdev_start(dev_id) != 0) {
383                 PRINT_ERR("Error with rte_rawdev_start()\n");
384                 return -1;
385         }
386
387         pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
388                         p.ring_size * 2, /* n == num elements */
389                         32,  /* cache size */
390                         0,   /* priv size */
391                         2048, /* data room size */
392                         info.socket_id);
393         if (pool == NULL) {
394                 PRINT_ERR("Error with mempool creation\n");
395                 return -1;
396         }
397
398         /* allocate memory for xstats names and values */
399         nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
400
401         snames = malloc(sizeof(*snames) * nb_xstats);
402         if (snames == NULL) {
403                 PRINT_ERR("Error allocating xstat names memory\n");
404                 goto err;
405         }
406         rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
407
408         ids = malloc(sizeof(*ids) * nb_xstats);
409         if (ids == NULL) {
410                 PRINT_ERR("Error allocating xstat ids memory\n");
411                 goto err;
412         }
413         for (i = 0; i < nb_xstats; i++)
414                 ids[i] = i;
415
416         stats = malloc(sizeof(*stats) * nb_xstats);
417         if (stats == NULL) {
418                 PRINT_ERR("Error allocating xstat memory\n");
419                 goto err;
420         }
421
422         /* run the test cases */
423         printf("Running Copy Tests\n");
424         for (i = 0; i < 100; i++) {
425                 unsigned int j;
426
427                 if (test_enqueue_copies(dev_id) != 0)
428                         goto err;
429
430                 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
431                 for (j = 0; j < nb_xstats; j++)
432                         printf("%s: %"PRIu64"   ", snames[j].name, stats[j]);
433                 printf("\r");
434         }
435         printf("\n");
436
437         /* test enqueue fill operation */
438         printf("Running Fill Tests\n");
439         for (i = 0; i < 100; i++) {
440                 unsigned int j;
441
442                 if (test_enqueue_fill(dev_id) != 0)
443                         goto err;
444
445                 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
446                 for (j = 0; j < nb_xstats; j++)
447                         printf("%s: %"PRIu64"   ", snames[j].name, stats[j]);
448                 printf("\r");
449         }
450         printf("\n");
451
452         printf("Running Burst Capacity Test\n");
453         if (test_burst_capacity(dev_id) != 0)
454                 goto err;
455
456         rte_rawdev_stop(dev_id);
457         if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
458                 PRINT_ERR("Error resetting xstat values\n");
459                 goto err;
460         }
461
462         rte_mempool_free(pool);
463         free(snames);
464         free(stats);
465         free(ids);
466         return 0;
467
468 err:
469         rte_rawdev_stop(dev_id);
470         rte_rawdev_xstats_reset(dev_id, NULL, 0);
471         rte_mempool_free(pool);
472         free(snames);
473         free(stats);
474         free(ids);
475         return -1;
476 }