be5056911a98386ac49dbb2d0c8db04c5cbb1cc8
[dpdk.git] / test / test / test_eventdev_octeontx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium networks. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *       * Redistributions of source code must retain the above copyright
11  *         notice, this list of conditions and the following disclaimer.
12  *       * Redistributions in binary form must reproduce the above copyright
13  *         notice, this list of conditions and the following disclaimer in
14  *         the documentation and/or other materials provided with the
15  *         distribution.
16  *       * Neither the name of Cavium networks nor the names of its
17  *         contributors may be used to endorse or promote products derived
18  *         from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
37 #include <rte_eal.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
41 #include <rte_mbuf.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
48
49 #include "test.h"
50
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS  (16 * 1024)
53
54 static int evdev;
55 static struct rte_mempool *eventdev_test_mempool;
56
57 struct event_attr {
58         uint32_t flow_id;
59         uint8_t event_type;
60         uint8_t sub_event_type;
61         uint8_t sched_type;
62         uint8_t queue;
63         uint8_t port;
64 };
65
66 static int
67 testsuite_setup(void)
68 {
69         const char *eventdev_name = "event_octeontx";
70
71         evdev = rte_event_dev_get_dev_id(eventdev_name);
72         if (evdev < 0) {
73                 printf("%d: Eventdev %s not found - creating.\n",
74                                 __LINE__, eventdev_name);
75                 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
76                         printf("Error creating eventdev %s\n", eventdev_name);
77                         return TEST_FAILED;
78                 }
79                 evdev = rte_event_dev_get_dev_id(eventdev_name);
80                 if (evdev < 0) {
81                         printf("Error finding newly created eventdev\n");
82                         return TEST_FAILED;
83                 }
84         }
85
86         return TEST_SUCCESS;
87 }
88
89 static void
90 testsuite_teardown(void)
91 {
92         rte_event_dev_close(evdev);
93 }
94
95 static inline void
96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97                         struct rte_event_dev_info *info)
98 {
99         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101         dev_conf->nb_event_ports = info->max_event_ports;
102         dev_conf->nb_event_queues = info->max_event_queues;
103         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104         dev_conf->nb_event_port_dequeue_depth =
105                         info->max_event_port_dequeue_depth;
106         dev_conf->nb_event_port_enqueue_depth =
107                         info->max_event_port_enqueue_depth;
108         dev_conf->nb_event_port_enqueue_depth =
109                         info->max_event_port_enqueue_depth;
110         dev_conf->nb_events_limit =
111                         info->max_num_events;
112 }
113
114 enum {
115         TEST_EVENTDEV_SETUP_DEFAULT,
116         TEST_EVENTDEV_SETUP_PRIORITY,
117         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
118 };
119
120 static inline int
121 _eventdev_setup(int mode)
122 {
123         int i, ret;
124         struct rte_event_dev_config dev_conf;
125         struct rte_event_dev_info info;
126         const char *pool_name = "evdev_octeontx_test_pool";
127
128         /* Create and destrory pool for each test case to make it standalone */
129         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
130                                         MAX_EVENTS,
131                                         0 /*MBUF_CACHE_SIZE*/,
132                                         0,
133                                         512, /* Use very small mbufs */
134                                         rte_socket_id());
135         if (!eventdev_test_mempool) {
136                 printf("ERROR creating mempool\n");
137                 return TEST_FAILED;
138         }
139
140         ret = rte_event_dev_info_get(evdev, &info);
141         TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
142         TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
143                         "max_num_events=%d < max_events=%d",
144                         info.max_num_events, MAX_EVENTS);
145
146         devconf_set_default_sane_values(&dev_conf, &info);
147         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
148                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
149
150         ret = rte_event_dev_configure(evdev, &dev_conf);
151         TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
152
153         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
154                 /* Configure event queues(0 to n) with
155                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
156                  * RTE_EVENT_DEV_PRIORITY_LOWEST
157                  */
158                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
159                                 rte_event_queue_count(evdev);
160                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
161                         struct rte_event_queue_conf queue_conf;
162
163                         ret = rte_event_queue_default_conf_get(evdev, i,
164                                                 &queue_conf);
165                         TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
166                         queue_conf.priority = i * step;
167                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
168                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
169                 }
170
171         } else {
172                 /* Configure event queues with default priority */
173                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
174                         ret = rte_event_queue_setup(evdev, i, NULL);
175                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
176                 }
177         }
178         /* Configure event ports */
179         for (i = 0; i < rte_event_port_count(evdev); i++) {
180                 ret = rte_event_port_setup(evdev, i, NULL);
181                 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
182                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
183                 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
184         }
185
186         ret = rte_event_dev_start(evdev);
187         TEST_ASSERT_SUCCESS(ret, "Failed to start device");
188
189         return TEST_SUCCESS;
190 }
191
192 static inline int
193 eventdev_setup(void)
194 {
195         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
196 }
197
198 static inline void
199 eventdev_teardown(void)
200 {
201         rte_event_dev_stop(evdev);
202         rte_mempool_free(eventdev_test_mempool);
203 }
204
205 static inline void
206 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
207                         uint32_t flow_id, uint8_t event_type,
208                         uint8_t sub_event_type, uint8_t sched_type,
209                         uint8_t queue, uint8_t port)
210 {
211         struct event_attr *attr;
212
213         /* Store the event attributes in mbuf for future reference */
214         attr = rte_pktmbuf_mtod(m, struct event_attr *);
215         attr->flow_id = flow_id;
216         attr->event_type = event_type;
217         attr->sub_event_type = sub_event_type;
218         attr->sched_type = sched_type;
219         attr->queue = queue;
220         attr->port = port;
221
222         ev->flow_id = flow_id;
223         ev->sub_event_type = sub_event_type;
224         ev->event_type = event_type;
225         /* Inject the new event */
226         ev->op = RTE_EVENT_OP_NEW;
227         ev->sched_type = sched_type;
228         ev->queue_id = queue;
229         ev->mbuf = m;
230 }
231
232 static inline int
233 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
234                 uint8_t sched_type, uint8_t queue, uint8_t port,
235                 unsigned int events)
236 {
237         struct rte_mbuf *m;
238         unsigned int i;
239
240         for (i = 0; i < events; i++) {
241                 struct rte_event ev = {.event = 0, .u64 = 0};
242
243                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
244                 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
245
246                 m->seqn = i;
247                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
248                         sub_event_type, sched_type, queue, port);
249                 rte_event_enqueue_burst(evdev, port, &ev, 1);
250         }
251         return 0;
252 }
253
254 static inline int
255 check_excess_events(uint8_t port)
256 {
257         int i;
258         uint16_t valid_event;
259         struct rte_event ev;
260
261         /* Check for excess events, try for a few times and exit */
262         for (i = 0; i < 32; i++) {
263                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
264
265                 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
266                                         ev.mbuf->seqn);
267         }
268         return 0;
269 }
270
271 static inline int
272 generate_random_events(const unsigned int total_events)
273 {
274         struct rte_event_dev_info info;
275         unsigned int i;
276         int ret;
277
278         ret = rte_event_dev_info_get(evdev, &info);
279         TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
280         for (i = 0; i < total_events; i++) {
281                 ret = inject_events(
282                         rte_rand() % info.max_event_queue_flows /*flow_id */,
283                         rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
284                         rte_rand() % 256 /* sub_event_type */,
285                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
286                         rte_rand() % rte_event_queue_count(evdev) /* queue */,
287                         0 /* port */,
288                         1 /* events */);
289                 if (ret)
290                         return TEST_FAILED;
291         }
292         return ret;
293 }
294
295
296 static inline int
297 validate_event(struct rte_event *ev)
298 {
299         struct event_attr *attr;
300
301         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
302         TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
303                         "flow_id mismatch enq=%d deq =%d",
304                         attr->flow_id, ev->flow_id);
305         TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
306                         "event_type mismatch enq=%d deq =%d",
307                         attr->event_type, ev->event_type);
308         TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
309                         "sub_event_type mismatch enq=%d deq =%d",
310                         attr->sub_event_type, ev->sub_event_type);
311         TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
312                         "sched_type mismatch enq=%d deq =%d",
313                         attr->sched_type, ev->sched_type);
314         TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
315                         "queue mismatch enq=%d deq =%d",
316                         attr->queue, ev->queue_id);
317         return 0;
318 }
319
320 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
321                                  struct rte_event *ev);
322
323 static inline int
324 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
325 {
326         int ret;
327         uint16_t valid_event;
328         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
329         struct rte_event ev;
330
331         while (1) {
332                 if (++forward_progress_cnt > UINT16_MAX) {
333                         printf("Detected deadlock\n");
334                         return TEST_FAILED;
335                 }
336
337                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
338                 if (!valid_event)
339                         continue;
340
341                 forward_progress_cnt = 0;
342                 ret = validate_event(&ev);
343                 if (ret)
344                         return TEST_FAILED;
345
346                 if (fn != NULL) {
347                         ret = fn(index, port, &ev);
348                         TEST_ASSERT_SUCCESS(ret,
349                                 "Failed to validate test specific event");
350                 }
351
352                 ++index;
353
354                 rte_pktmbuf_free(ev.mbuf);
355                 if (++events >= total_events)
356                         break;
357         }
358
359         return check_excess_events(port);
360 }
361
362 static int
363 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
364 {
365         RTE_SET_USED(port);
366         TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
367                                         ev->mbuf->seqn);
368         return 0;
369 }
370
371 static inline int
372 test_simple_enqdeq(uint8_t sched_type)
373 {
374         int ret;
375
376         ret = inject_events(0 /*flow_id */,
377                                 RTE_EVENT_TYPE_CPU /* event_type */,
378                                 0 /* sub_event_type */,
379                                 sched_type,
380                                 0 /* queue */,
381                                 0 /* port */,
382                                 MAX_EVENTS);
383         if (ret)
384                 return TEST_FAILED;
385
386         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
387 }
388
389 static int
390 test_simple_enqdeq_ordered(void)
391 {
392         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
393 }
394
395 static int
396 test_simple_enqdeq_atomic(void)
397 {
398         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
399 }
400
401 static int
402 test_simple_enqdeq_parallel(void)
403 {
404         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
405 }
406
407 /*
408  * Generate a prescribed number of events and spread them across available
409  * queues. On dequeue, using single event port(port 0) verify the enqueued
410  * event attributes
411  */
412 static int
413 test_multi_queue_enq_single_port_deq(void)
414 {
415         int ret;
416
417         ret = generate_random_events(MAX_EVENTS);
418         if (ret)
419                 return TEST_FAILED;
420
421         return consume_events(0 /* port */, MAX_EVENTS, NULL);
422 }
423
424 static struct unit_test_suite eventdev_octeontx_testsuite  = {
425         .suite_name = "eventdev octeontx unit test suite",
426         .setup = testsuite_setup,
427         .teardown = testsuite_teardown,
428         .unit_test_cases = {
429                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
430                         test_simple_enqdeq_ordered),
431                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
432                         test_simple_enqdeq_atomic),
433                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
434                         test_simple_enqdeq_parallel),
435                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
436                         test_multi_queue_enq_single_port_deq),
437                 TEST_CASES_END() /**< NULL terminate unit test array */
438         }
439 };
440
441 static int
442 test_eventdev_octeontx(void)
443 {
444         return unit_test_suite_runner(&eventdev_octeontx_testsuite);
445 }
446
447 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);