2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
13 #include <rte_memory.h>
14 #include <rte_memzone.h>
15 #include <rte_launch.h>
17 #include <rte_per_lcore.h>
18 #include <rte_lcore.h>
19 #include <rte_debug.h>
20 #include <rte_ethdev.h>
21 #include <rte_cycles.h>
22 #include <rte_eventdev.h>
23 #include <rte_bus_vdev.h>
24 #include <rte_pause.h>
26 #include "opdl_evdev.h"
32 #define NUM_PACKETS (1<<18)
33 #define NUM_EVENTS 256
41 struct rte_mempool *mbuf_pool;
42 uint8_t port[MAX_PORTS];
43 uint8_t qid[MAX_QIDS];
47 static struct rte_mempool *eventdev_func_mempool;
49 static __rte_always_inline struct rte_mbuf *
50 rte_gen_arp(int portid, struct rte_mempool *mp)
54 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
56 static const uint8_t arp_request[] = {
57 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
58 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
59 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
60 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
61 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
62 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
63 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00
67 int pkt_len = sizeof(arp_request) - 1;
69 m = rte_pktmbuf_alloc(mp);
73 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
74 arp_request, pkt_len);
75 rte_pktmbuf_pkt_len(m) = pkt_len;
76 rte_pktmbuf_data_len(m) = pkt_len;
83 /* initialization and config */
84 static __rte_always_inline int
85 init(struct test *t, int nb_queues, int nb_ports)
87 struct rte_event_dev_config config = {
88 .nb_event_queues = nb_queues,
89 .nb_event_ports = nb_ports,
90 .nb_event_queue_flows = 1024,
91 .nb_events_limit = 4096,
92 .nb_event_port_dequeue_depth = 128,
93 .nb_event_port_enqueue_depth = 128,
97 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
99 memset(t, 0, sizeof(*t));
102 ret = rte_event_dev_configure(evdev, &config);
104 PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
108 static __rte_always_inline int
109 create_ports(struct test *t, int num_ports)
112 static const struct rte_event_port_conf conf = {
113 .new_event_threshold = 1024,
117 if (num_ports > MAX_PORTS)
120 for (i = 0; i < num_ports; i++) {
121 if (rte_event_port_setup(evdev, i, &conf) < 0) {
122 PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
131 static __rte_always_inline int
132 create_queues_type(struct test *t, int num_qids, enum queue_type flags)
138 case OPDL_Q_TYPE_ORDERED:
139 type = RTE_SCHED_TYPE_ORDERED;
141 case OPDL_Q_TYPE_ATOMIC:
142 type = RTE_SCHED_TYPE_ATOMIC;
149 const struct rte_event_queue_conf conf = {
151 (flags == OPDL_Q_TYPE_SINGLE_LINK ?
152 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
153 .schedule_type = type,
154 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
155 .nb_atomic_flows = 1024,
156 .nb_atomic_order_sequences = 1024,
159 for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
160 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
161 PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
168 t->nb_qids += num_qids;
170 if (t->nb_qids > MAX_QIDS)
178 static __rte_always_inline int
179 cleanup(struct test *t __rte_unused)
181 rte_event_dev_stop(evdev);
182 rte_event_dev_close(evdev);
183 PMD_DRV_LOG(ERR, "clean up for test done\n");
188 ordered_basic(struct test *t)
190 const uint8_t rx_port = 0;
191 const uint8_t w1_port = 1;
192 const uint8_t w3_port = 3;
193 const uint8_t tx_port = 4;
197 struct rte_mbuf *mbufs[3];
199 const uint32_t MAGIC_SEQN = 1234;
201 /* Create instance with 5 ports */
202 if (init(t, 2, tx_port+1) < 0 ||
203 create_ports(t, tx_port+1) < 0 ||
204 create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
205 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
211 * We need three ports, all mapped to the same ordered qid0. Then we'll
212 * take a packet out to each port, re-enqueue in reverse order,
213 * then make sure the reordering has taken place properly when we
214 * dequeue from the tx_port.
216 * Simplified test setup diagram:
220 * qid0 - w2_port - qid1
224 /* CQ mapping to QID for LB ports (directed mapped on create) */
225 for (i = w1_port; i <= w3_port; i++) {
226 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
229 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
236 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
239 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
244 if (rte_event_dev_start(evdev) < 0) {
245 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
248 /* Enqueue 3 packets to the rx port */
249 for (i = 0; i < 3; i++) {
251 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
253 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
257 ev.queue_id = t->qid[0];
258 ev.op = RTE_EVENT_OP_NEW;
260 mbufs[i]->seqn = MAGIC_SEQN + i;
262 /* generate pkt and enqueue */
263 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
265 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
271 /* use extra slot to make logic in loops easier */
272 struct rte_event deq_ev[w3_port + 1];
276 /* Dequeue the 3 packets, one from each worker port */
277 for (i = w1_port; i <= w3_port; i++) {
278 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
281 PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
282 rte_event_dev_dump(evdev, stdout);
285 seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
288 PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
289 "port number is %u\n", seq, i);
294 /* Enqueue each packet in reverse order, flushing after each one */
295 for (i = w3_port; i >= w1_port; i--) {
297 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
298 deq_ev[i].queue_id = t->qid[1];
299 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
301 PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
306 /* dequeue from the tx ports, we should get 3 packets */
307 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
310 /* Check to see if we've got all 3 packets */
312 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
313 __LINE__, deq_pkts, tx_port);
314 rte_event_dev_dump(evdev, stdout);
318 /* Destroy the instance */
326 atomic_basic(struct test *t)
328 const uint8_t rx_port = 0;
329 const uint8_t w1_port = 1;
330 const uint8_t w3_port = 3;
331 const uint8_t tx_port = 4;
335 struct rte_mbuf *mbufs[3];
336 const uint32_t MAGIC_SEQN = 1234;
338 /* Create instance with 5 ports */
339 if (init(t, 2, tx_port+1) < 0 ||
340 create_ports(t, tx_port+1) < 0 ||
341 create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
342 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
349 * We need three ports, all mapped to the same ordered qid0. Then we'll
350 * take a packet out to each port, re-enqueue in reverse order,
351 * then make sure the reordering has taken place properly when we
352 * dequeue from the tx_port.
354 * Simplified test setup diagram:
358 * qid0 - w2_port - qid1
362 /* CQ mapping to QID for Atomic ports (directed mapped on create) */
363 for (i = w1_port; i <= w3_port; i++) {
364 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
367 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
374 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
377 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
382 if (rte_event_dev_start(evdev) < 0) {
383 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
387 /* Enqueue 3 packets to the rx port */
388 for (i = 0; i < 3; i++) {
390 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
392 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
396 ev.queue_id = t->qid[0];
397 ev.op = RTE_EVENT_OP_NEW;
400 mbufs[i]->seqn = MAGIC_SEQN + i;
402 /* generate pkt and enqueue */
403 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
405 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
411 /* use extra slot to make logic in loops easier */
412 struct rte_event deq_ev[w3_port + 1];
414 /* Dequeue the 3 packets, one from each worker port */
415 for (i = w1_port; i <= w3_port; i++) {
417 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
420 if (t->port[i] != 2) {
422 PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
424 rte_event_dev_dump(evdev, stdout);
430 PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
432 rte_event_dev_dump(evdev, stdout);
436 for (int j = 0; j < 3; j++) {
437 deq_ev[j].op = RTE_EVENT_OP_FORWARD;
438 deq_ev[j].queue_id = t->qid[1];
441 err = rte_event_enqueue_burst(evdev, t->port[i],
445 PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
456 /* dequeue from the tx ports, we should get 3 packets */
457 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
460 /* Check to see if we've got all 3 packets */
462 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
463 __LINE__, deq_pkts, tx_port);
464 rte_event_dev_dump(evdev, stdout);
472 static __rte_always_inline int
473 check_qid_stats(uint32_t id[], int index)
477 if (id[0] != 3 || id[1] != 3
480 } else if (index == 1) {
481 if (id[0] != 5 || id[1] != 5
484 } else if (index == 2) {
485 if (id[0] != 3 || id[1] != 1
495 check_statistics(void)
497 int num_ports = 3; /* Hard-coded for this app */
499 for (int i = 0; i < num_ports; i++) {
500 int num_stats, num_stats_returned;
502 num_stats = rte_event_dev_xstats_names_get(0,
503 RTE_EVENT_DEV_XSTATS_PORT,
510 uint32_t id[num_stats];
511 struct rte_event_dev_xstats_name names[num_stats];
512 uint64_t values[num_stats];
514 num_stats_returned = rte_event_dev_xstats_names_get(0,
515 RTE_EVENT_DEV_XSTATS_PORT,
521 if (num_stats == num_stats_returned) {
522 num_stats_returned = rte_event_dev_xstats_get(0,
523 RTE_EVENT_DEV_XSTATS_PORT,
529 if (num_stats == num_stats_returned) {
532 err = check_qid_stats(id, i);
550 #define OLD_NUM_PACKETS 3
551 #define NEW_NUM_PACKETS 2
553 single_link_w_stats(struct test *t)
555 const uint8_t rx_port = 0;
556 const uint8_t w1_port = 1;
557 const uint8_t tx_port = 2;
561 struct rte_mbuf *mbufs[3];
565 /* Create instance with 3 ports */
566 if (init(t, 2, tx_port + 1) < 0 ||
567 create_ports(t, 3) < 0 || /* 0,1,2 */
568 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
569 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
570 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
577 * Simplified test setup diagram:
581 * qid0 - w1_port(1) - qid1
586 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
589 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
597 err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
600 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
608 if (rte_event_dev_start(evdev) != 0) {
609 PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
615 * Enqueue 3 packets to the rx port
617 for (i = 0; i < 3; i++) {
619 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
621 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
625 ev.queue_id = t->qid[0];
626 ev.op = RTE_EVENT_OP_NEW;
628 mbufs[i]->seqn = 1234 + i;
630 /* generate pkt and enqueue */
631 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
633 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
641 /* Dequeue the 3 packets, from SINGLE_LINK worker port */
642 struct rte_event deq_ev[3];
644 deq_pkts = rte_event_dequeue_burst(evdev,
649 PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
654 /* Just enqueue 2 onto new ring */
655 for (i = 0; i < NEW_NUM_PACKETS; i++)
656 deq_ev[i].queue_id = t->qid[1];
658 deq_pkts = rte_event_enqueue_burst(evdev,
664 PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
669 /* dequeue from the tx ports, we should get 2 packets */
670 deq_pkts = rte_event_dequeue_burst(evdev,
676 /* Check to see if we've got all 2 packets */
678 PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
679 __LINE__, deq_pkts, tx_port);
684 if (!check_statistics()) {
685 PMD_DRV_LOG(ERR, "xstats check failed");
696 single_link(struct test *t)
698 /* const uint8_t rx_port = 0; */
699 /* const uint8_t w1_port = 1; */
700 /* const uint8_t w3_port = 3; */
701 const uint8_t tx_port = 2;
704 struct rte_mbuf *mbufs[3];
708 /* Create instance with 5 ports */
709 if (init(t, 2, tx_port+1) < 0 ||
710 create_ports(t, 3) < 0 || /* 0,1,2 */
711 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
712 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
713 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
720 * Simplified test setup diagram:
724 * qid0 - w1_port(1) - qid1
729 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
732 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
737 err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
740 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
745 if (rte_event_dev_start(evdev) == 0) {
746 PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
747 "SINGLE_LINK PORT\n", __LINE__);
758 static __rte_always_inline void
759 populate_event_burst(struct rte_event ev[],
764 for (i = 0; i < num_events; i++) {
766 ev[i].op = RTE_EVENT_OP_NEW;
767 ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
768 ev[i].queue_id = qid;
769 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
770 ev[i].sub_event_type = 0;
771 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
772 ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
777 #define BATCH_SIZE 32
780 qid_basic(struct test *t)
790 struct rte_event ev[BATCH_SIZE];
792 /* Create instance with 4 ports */
793 if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
794 create_ports(t, NUM_QUEUES+1) < 0 ||
795 create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
796 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
800 for (i = 0; i < NUM_QUEUES; i++) {
804 nb_linked = rte_event_port_link(evdev,
805 i+1, /* port = q_id + 1*/
810 if (nb_linked != 1) {
812 PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
825 /* Try and link to the same port again */
828 if (rte_event_port_link(evdev,
833 PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
839 uint32_t test_num_events;
842 test_num_events = rte_event_dequeue_burst(evdev,
847 if (test_num_events != 0) {
848 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
857 test_num_events = rte_event_enqueue_burst(evdev,
861 if (test_num_events != 0) {
862 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
872 /* Start the devicea */
874 if (rte_event_dev_start(evdev) < 0) {
875 PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
883 /* Check we can't do any more links now that device is started.*/
886 if (rte_event_port_link(evdev,
891 PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
902 populate_event_burst(ev,
906 num_events = rte_event_enqueue_burst(evdev,
910 if (num_events != BATCH_SIZE) {
911 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
919 while (++p_id < NUM_QUEUES) {
921 num_events = rte_event_dequeue_burst(evdev,
927 if (num_events != BATCH_SIZE) {
928 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
936 if (ev[0].queue_id != q_id) {
937 PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
947 populate_event_burst(ev,
951 num_events = rte_event_enqueue_burst(evdev,
955 if (num_events != BATCH_SIZE) {
956 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
968 num_events = rte_event_dequeue_burst(evdev,
973 if (num_events != BATCH_SIZE) {
974 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
992 struct test *t = malloc(sizeof(struct test));
995 const char *eventdev_name = "event_opdl0";
997 evdev = rte_event_dev_get_dev_id(eventdev_name);
1000 PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
1001 __LINE__, eventdev_name);
1002 /* turn on stats by default */
1003 if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
1004 PMD_DRV_LOG(ERR, "Error creating eventdev\n");
1007 evdev = rte_event_dev_get_dev_id(eventdev_name);
1009 PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
1014 /* Only create mbuf pool once, reuse for each test run */
1015 if (!eventdev_func_mempool) {
1016 eventdev_func_mempool = rte_pktmbuf_pool_create(
1017 "EVENTDEV_SW_SA_MBUF_POOL",
1018 (1<<12), /* 4k buffers */
1019 32 /*MBUF_CACHE_SIZE*/,
1021 512, /* use very small mbufs */
1023 if (!eventdev_func_mempool) {
1024 PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
1028 t->mbuf_pool = eventdev_func_mempool;
1030 PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
1031 ret = ordered_basic(t);
1033 PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
1034 ret = atomic_basic(t);
1037 PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n");
1040 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
1041 ret = single_link(t);
1043 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
1044 ret = single_link_w_stats(t);
1047 * Free test instance, leaving mempool initialized, and a pointer to it
1048 * in static eventdev_func_mempool, as it is re-used on re-runs