1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include "rte_rawdev.h"
9 #include "rte_ioat_rawdev.h"
11 #define MAX_SUPPORTED_RAWDEVS 64
12 #define TEST_SKIPPED 77
14 int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
16 static struct rte_mempool *pool;
17 static unsigned short expected_ring_size[MAX_SUPPORTED_RAWDEVS];
19 #define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
22 __rte_format_printf(3, 4)
23 print_err(const char *func, int lineno, const char *format, ...)
28 ret = fprintf(stderr, "In %s:%d - ", func, lineno);
30 ret += vfprintf(stderr, format, ap);
37 test_enqueue_copies(int dev_id)
39 const unsigned int length = 1024;
43 struct rte_mbuf *src, *dst;
44 char *src_data, *dst_data;
45 struct rte_mbuf *completed[2] = {0};
47 /* test doing a single copy */
48 src = rte_pktmbuf_alloc(pool);
49 dst = rte_pktmbuf_alloc(pool);
50 src->data_len = src->pkt_len = length;
51 dst->data_len = dst->pkt_len = length;
52 src_data = rte_pktmbuf_mtod(src, char *);
53 dst_data = rte_pktmbuf_mtod(dst, char *);
55 for (i = 0; i < length; i++)
56 src_data[i] = rand() & 0xFF;
58 if (rte_ioat_enqueue_copy(dev_id,
59 src->buf_iova + src->data_off,
60 dst->buf_iova + dst->data_off,
63 (uintptr_t)dst) != 1) {
64 PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
67 rte_ioat_perform_ops(dev_id);
70 if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
71 (void *)&completed[1]) != 1) {
72 PRINT_ERR("Error with rte_ioat_completed_ops\n");
75 if (completed[0] != src || completed[1] != dst) {
76 PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
77 completed[0], completed[1], src, dst);
81 for (i = 0; i < length; i++)
82 if (dst_data[i] != src_data[i]) {
83 PRINT_ERR("Data mismatch at char %u\n", i);
86 rte_pktmbuf_free(src);
87 rte_pktmbuf_free(dst);
90 /* test doing multiple copies */
92 struct rte_mbuf *srcs[32], *dsts[32];
93 struct rte_mbuf *completed_src[64];
94 struct rte_mbuf *completed_dst[64];
97 for (i = 0; i < RTE_DIM(srcs); i++) {
100 srcs[i] = rte_pktmbuf_alloc(pool);
101 dsts[i] = rte_pktmbuf_alloc(pool);
102 srcs[i]->data_len = srcs[i]->pkt_len = length;
103 dsts[i]->data_len = dsts[i]->pkt_len = length;
104 src_data = rte_pktmbuf_mtod(srcs[i], char *);
106 for (j = 0; j < length; j++)
107 src_data[j] = rand() & 0xFF;
109 if (rte_ioat_enqueue_copy(dev_id,
110 srcs[i]->buf_iova + srcs[i]->data_off,
111 dsts[i]->buf_iova + dsts[i]->data_off,
114 (uintptr_t)dsts[i]) != 1) {
115 PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
120 rte_ioat_perform_ops(dev_id);
123 if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src,
124 (void *)completed_dst) != RTE_DIM(srcs)) {
125 PRINT_ERR("Error with rte_ioat_completed_ops\n");
128 for (i = 0; i < RTE_DIM(srcs); i++) {
129 char *src_data, *dst_data;
131 if (completed_src[i] != srcs[i]) {
132 PRINT_ERR("Error with source pointer %u\n", i);
135 if (completed_dst[i] != dsts[i]) {
136 PRINT_ERR("Error with dest pointer %u\n", i);
140 src_data = rte_pktmbuf_mtod(srcs[i], char *);
141 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
142 for (j = 0; j < length; j++)
143 if (src_data[j] != dst_data[j]) {
144 PRINT_ERR("Error with copy of packet %u, byte %u\n",
148 rte_pktmbuf_free(srcs[i]);
149 rte_pktmbuf_free(dsts[i]);
158 ioat_rawdev_test(uint16_t dev_id)
160 #define IOAT_TEST_RINGSIZE 512
161 struct rte_ioat_rawdev_config p = { .ring_size = -1 };
162 struct rte_rawdev_info info = { .dev_private = &p };
163 struct rte_rawdev_xstats_name *snames = NULL;
164 uint64_t *stats = NULL;
165 unsigned int *ids = NULL;
166 unsigned int nb_xstats;
169 if (dev_id >= MAX_SUPPORTED_RAWDEVS) {
170 printf("Skipping test. Cannot test rawdevs with id's greater than %d\n",
171 MAX_SUPPORTED_RAWDEVS);
175 rte_rawdev_info_get(dev_id, &info, sizeof(p));
176 if (p.ring_size != expected_ring_size[dev_id]) {
177 PRINT_ERR("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
178 (int)p.ring_size, expected_ring_size[dev_id]);
182 p.ring_size = IOAT_TEST_RINGSIZE;
183 if (rte_rawdev_configure(dev_id, &info, sizeof(p)) != 0) {
184 PRINT_ERR("Error with rte_rawdev_configure()\n");
187 rte_rawdev_info_get(dev_id, &info, sizeof(p));
188 if (p.ring_size != IOAT_TEST_RINGSIZE) {
189 PRINT_ERR("Error, ring size is not %d (%d)\n",
190 IOAT_TEST_RINGSIZE, (int)p.ring_size);
193 expected_ring_size[dev_id] = p.ring_size;
195 if (rte_rawdev_start(dev_id) != 0) {
196 PRINT_ERR("Error with rte_rawdev_start()\n");
200 pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
201 256, /* n == num elements */
204 2048, /* data room size */
207 PRINT_ERR("Error with mempool creation\n");
211 /* allocate memory for xstats names and values */
212 nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
214 snames = malloc(sizeof(*snames) * nb_xstats);
215 if (snames == NULL) {
216 PRINT_ERR("Error allocating xstat names memory\n");
219 rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
221 ids = malloc(sizeof(*ids) * nb_xstats);
223 PRINT_ERR("Error allocating xstat ids memory\n");
226 for (i = 0; i < nb_xstats; i++)
229 stats = malloc(sizeof(*stats) * nb_xstats);
231 PRINT_ERR("Error allocating xstat memory\n");
235 /* run the test cases */
236 for (i = 0; i < 100; i++) {
239 if (test_enqueue_copies(dev_id) != 0)
242 rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
243 for (j = 0; j < nb_xstats; j++)
244 printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
249 rte_rawdev_stop(dev_id);
250 if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
251 PRINT_ERR("Error resetting xstat values\n");
255 rte_mempool_free(pool);
262 rte_rawdev_stop(dev_id);
263 rte_rawdev_xstats_reset(dev_id, NULL, 0);
264 rte_mempool_free(pool);