test/eventdev: add octeontx simple enq/deq tests
[dpdk.git] / test / test / test_eventdev_octeontx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium networks. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *       * Redistributions of source code must retain the above copyright
11  *         notice, this list of conditions and the following disclaimer.
12  *       * Redistributions in binary form must reproduce the above copyright
13  *         notice, this list of conditions and the following disclaimer in
14  *         the documentation and/or other materials provided with the
15  *         distribution.
16  *       * Neither the name of Cavium networks nor the names of its
17  *         contributors may be used to endorse or promote products derived
18  *         from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
37 #include <rte_eal.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
41 #include <rte_mbuf.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
48
49 #include "test.h"
50
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS  (16 * 1024)
53
54 static int evdev;
55 static struct rte_mempool *eventdev_test_mempool;
56
57 struct event_attr {
58         uint32_t flow_id;
59         uint8_t event_type;
60         uint8_t sub_event_type;
61         uint8_t sched_type;
62         uint8_t queue;
63         uint8_t port;
64 };
65
66 static int
67 testsuite_setup(void)
68 {
69         const char *eventdev_name = "event_octeontx";
70
71         evdev = rte_event_dev_get_dev_id(eventdev_name);
72         if (evdev < 0) {
73                 printf("%d: Eventdev %s not found - creating.\n",
74                                 __LINE__, eventdev_name);
75                 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
76                         printf("Error creating eventdev %s\n", eventdev_name);
77                         return TEST_FAILED;
78                 }
79                 evdev = rte_event_dev_get_dev_id(eventdev_name);
80                 if (evdev < 0) {
81                         printf("Error finding newly created eventdev\n");
82                         return TEST_FAILED;
83                 }
84         }
85
86         return TEST_SUCCESS;
87 }
88
89 static void
90 testsuite_teardown(void)
91 {
92         rte_event_dev_close(evdev);
93 }
94
95 static inline void
96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97                         struct rte_event_dev_info *info)
98 {
99         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101         dev_conf->nb_event_ports = info->max_event_ports;
102         dev_conf->nb_event_queues = info->max_event_queues;
103         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104         dev_conf->nb_event_port_dequeue_depth =
105                         info->max_event_port_dequeue_depth;
106         dev_conf->nb_event_port_enqueue_depth =
107                         info->max_event_port_enqueue_depth;
108         dev_conf->nb_event_port_enqueue_depth =
109                         info->max_event_port_enqueue_depth;
110         dev_conf->nb_events_limit =
111                         info->max_num_events;
112 }
113
114 enum {
115         TEST_EVENTDEV_SETUP_DEFAULT,
116         TEST_EVENTDEV_SETUP_PRIORITY,
117         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
118 };
119
120 static inline int
121 _eventdev_setup(int mode)
122 {
123         int i, ret;
124         struct rte_event_dev_config dev_conf;
125         struct rte_event_dev_info info;
126         const char *pool_name = "evdev_octeontx_test_pool";
127
128         /* Create and destrory pool for each test case to make it standalone */
129         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
130                                         MAX_EVENTS,
131                                         0 /*MBUF_CACHE_SIZE*/,
132                                         0,
133                                         512, /* Use very small mbufs */
134                                         rte_socket_id());
135         if (!eventdev_test_mempool) {
136                 printf("ERROR creating mempool\n");
137                 return TEST_FAILED;
138         }
139
140         ret = rte_event_dev_info_get(evdev, &info);
141         TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
142         TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
143                         "max_num_events=%d < max_events=%d",
144                         info.max_num_events, MAX_EVENTS);
145
146         devconf_set_default_sane_values(&dev_conf, &info);
147         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
148                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
149
150         ret = rte_event_dev_configure(evdev, &dev_conf);
151         TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
152
153         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
154                 /* Configure event queues(0 to n) with
155                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
156                  * RTE_EVENT_DEV_PRIORITY_LOWEST
157                  */
158                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
159                                 rte_event_queue_count(evdev);
160                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
161                         struct rte_event_queue_conf queue_conf;
162
163                         ret = rte_event_queue_default_conf_get(evdev, i,
164                                                 &queue_conf);
165                         TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
166                         queue_conf.priority = i * step;
167                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
168                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
169                 }
170
171         } else {
172                 /* Configure event queues with default priority */
173                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
174                         ret = rte_event_queue_setup(evdev, i, NULL);
175                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
176                 }
177         }
178         /* Configure event ports */
179         for (i = 0; i < rte_event_port_count(evdev); i++) {
180                 ret = rte_event_port_setup(evdev, i, NULL);
181                 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
182                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
183                 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
184         }
185
186         ret = rte_event_dev_start(evdev);
187         TEST_ASSERT_SUCCESS(ret, "Failed to start device");
188
189         return TEST_SUCCESS;
190 }
191
192 static inline int
193 eventdev_setup(void)
194 {
195         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
196 }
197
198 static inline void
199 eventdev_teardown(void)
200 {
201         rte_event_dev_stop(evdev);
202         rte_mempool_free(eventdev_test_mempool);
203 }
204
205 static inline void
206 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
207                         uint32_t flow_id, uint8_t event_type,
208                         uint8_t sub_event_type, uint8_t sched_type,
209                         uint8_t queue, uint8_t port)
210 {
211         struct event_attr *attr;
212
213         /* Store the event attributes in mbuf for future reference */
214         attr = rte_pktmbuf_mtod(m, struct event_attr *);
215         attr->flow_id = flow_id;
216         attr->event_type = event_type;
217         attr->sub_event_type = sub_event_type;
218         attr->sched_type = sched_type;
219         attr->queue = queue;
220         attr->port = port;
221
222         ev->flow_id = flow_id;
223         ev->sub_event_type = sub_event_type;
224         ev->event_type = event_type;
225         /* Inject the new event */
226         ev->op = RTE_EVENT_OP_NEW;
227         ev->sched_type = sched_type;
228         ev->queue_id = queue;
229         ev->mbuf = m;
230 }
231
232 static inline int
233 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
234                 uint8_t sched_type, uint8_t queue, uint8_t port,
235                 unsigned int events)
236 {
237         struct rte_mbuf *m;
238         unsigned int i;
239
240         for (i = 0; i < events; i++) {
241                 struct rte_event ev = {.event = 0, .u64 = 0};
242
243                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
244                 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
245
246                 m->seqn = i;
247                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
248                         sub_event_type, sched_type, queue, port);
249                 rte_event_enqueue_burst(evdev, port, &ev, 1);
250         }
251         return 0;
252 }
253
254 static inline int
255 check_excess_events(uint8_t port)
256 {
257         int i;
258         uint16_t valid_event;
259         struct rte_event ev;
260
261         /* Check for excess events, try for a few times and exit */
262         for (i = 0; i < 32; i++) {
263                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
264
265                 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
266                                         ev.mbuf->seqn);
267         }
268         return 0;
269 }
270
271 static inline int
272 validate_event(struct rte_event *ev)
273 {
274         struct event_attr *attr;
275
276         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
277         TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
278                         "flow_id mismatch enq=%d deq =%d",
279                         attr->flow_id, ev->flow_id);
280         TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
281                         "event_type mismatch enq=%d deq =%d",
282                         attr->event_type, ev->event_type);
283         TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
284                         "sub_event_type mismatch enq=%d deq =%d",
285                         attr->sub_event_type, ev->sub_event_type);
286         TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
287                         "sched_type mismatch enq=%d deq =%d",
288                         attr->sched_type, ev->sched_type);
289         TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
290                         "queue mismatch enq=%d deq =%d",
291                         attr->queue, ev->queue_id);
292         return 0;
293 }
294
295 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
296                                  struct rte_event *ev);
297
298 static inline int
299 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
300 {
301         int ret;
302         uint16_t valid_event;
303         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
304         struct rte_event ev;
305
306         while (1) {
307                 if (++forward_progress_cnt > UINT16_MAX) {
308                         printf("Detected deadlock\n");
309                         return TEST_FAILED;
310                 }
311
312                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
313                 if (!valid_event)
314                         continue;
315
316                 forward_progress_cnt = 0;
317                 ret = validate_event(&ev);
318                 if (ret)
319                         return TEST_FAILED;
320
321                 if (fn != NULL) {
322                         ret = fn(index, port, &ev);
323                         TEST_ASSERT_SUCCESS(ret,
324                                 "Failed to validate test specific event");
325                 }
326
327                 ++index;
328
329                 rte_pktmbuf_free(ev.mbuf);
330                 if (++events >= total_events)
331                         break;
332         }
333
334         return check_excess_events(port);
335 }
336
337 static int
338 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
339 {
340         RTE_SET_USED(port);
341         TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
342                                         ev->mbuf->seqn);
343         return 0;
344 }
345
346 static inline int
347 test_simple_enqdeq(uint8_t sched_type)
348 {
349         int ret;
350
351         ret = inject_events(0 /*flow_id */,
352                                 RTE_EVENT_TYPE_CPU /* event_type */,
353                                 0 /* sub_event_type */,
354                                 sched_type,
355                                 0 /* queue */,
356                                 0 /* port */,
357                                 MAX_EVENTS);
358         if (ret)
359                 return TEST_FAILED;
360
361         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
362 }
363
364 static int
365 test_simple_enqdeq_ordered(void)
366 {
367         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
368 }
369
370 static int
371 test_simple_enqdeq_atomic(void)
372 {
373         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
374 }
375
376 static int
377 test_simple_enqdeq_parallel(void)
378 {
379         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
380 }
381
382 static struct unit_test_suite eventdev_octeontx_testsuite  = {
383         .suite_name = "eventdev octeontx unit test suite",
384         .setup = testsuite_setup,
385         .teardown = testsuite_teardown,
386         .unit_test_cases = {
387                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
388                         test_simple_enqdeq_ordered),
389                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
390                         test_simple_enqdeq_atomic),
391                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
392                         test_simple_enqdeq_parallel),
393                 TEST_CASES_END() /**< NULL terminate unit test array */
394         }
395 };
396
397 static int
398 test_eventdev_octeontx(void)
399 {
400         return unit_test_suite_runner(&eventdev_octeontx_testsuite);
401 }
402
403 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);