1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
16 #include <rte_per_lcore.h>
17 #include <rte_lcore.h>
18 #include <rte_debug.h>
19 #include <rte_ethdev.h>
20 #include <rte_cycles.h>
21 #include <rte_eventdev.h>
22 #include <rte_bus_vdev.h>
23 #include <rte_pause.h>
25 #include "opdl_evdev.h"
31 #define NUM_PACKETS (1<<18)
32 #define NUM_EVENTS 256
40 struct rte_mempool *mbuf_pool;
41 uint8_t port[MAX_PORTS];
42 uint8_t qid[MAX_QIDS];
46 static struct rte_mempool *eventdev_func_mempool;
48 static __rte_always_inline struct rte_mbuf *
49 rte_gen_arp(int portid, struct rte_mempool *mp)
53 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
55 static const uint8_t arp_request[] = {
56 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
57 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
58 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
59 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
60 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
61 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
62 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00
66 int pkt_len = sizeof(arp_request) - 1;
68 m = rte_pktmbuf_alloc(mp);
72 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
73 arp_request, pkt_len);
74 rte_pktmbuf_pkt_len(m) = pkt_len;
75 rte_pktmbuf_data_len(m) = pkt_len;
82 /* initialization and config */
83 static __rte_always_inline int
84 init(struct test *t, int nb_queues, int nb_ports)
86 struct rte_event_dev_config config = {
87 .nb_event_queues = nb_queues,
88 .nb_event_ports = nb_ports,
89 .nb_event_queue_flows = 1024,
90 .nb_events_limit = 4096,
91 .nb_event_port_dequeue_depth = 128,
92 .nb_event_port_enqueue_depth = 128,
96 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
98 memset(t, 0, sizeof(*t));
101 ret = rte_event_dev_configure(evdev, &config);
103 PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
107 static __rte_always_inline int
108 create_ports(struct test *t, int num_ports)
111 static const struct rte_event_port_conf conf = {
112 .new_event_threshold = 1024,
116 if (num_ports > MAX_PORTS)
119 for (i = 0; i < num_ports; i++) {
120 if (rte_event_port_setup(evdev, i, &conf) < 0) {
121 PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
130 static __rte_always_inline int
131 create_queues_type(struct test *t, int num_qids, enum queue_type flags)
137 case OPDL_Q_TYPE_ORDERED:
138 type = RTE_SCHED_TYPE_ORDERED;
140 case OPDL_Q_TYPE_ATOMIC:
141 type = RTE_SCHED_TYPE_ATOMIC;
148 const struct rte_event_queue_conf conf = {
150 (flags == OPDL_Q_TYPE_SINGLE_LINK ?
151 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
152 .schedule_type = type,
153 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
154 .nb_atomic_flows = 1024,
155 .nb_atomic_order_sequences = 1024,
158 for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
159 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
160 PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
167 t->nb_qids += num_qids;
169 if (t->nb_qids > MAX_QIDS)
177 static __rte_always_inline int
178 cleanup(struct test *t __rte_unused)
180 rte_event_dev_stop(evdev);
181 rte_event_dev_close(evdev);
182 PMD_DRV_LOG(ERR, "clean up for test done\n");
187 ordered_basic(struct test *t)
189 const uint8_t rx_port = 0;
190 const uint8_t w1_port = 1;
191 const uint8_t w3_port = 3;
192 const uint8_t tx_port = 4;
196 struct rte_mbuf *mbufs[3];
198 const uint32_t MAGIC_SEQN = 1234;
200 /* Create instance with 5 ports */
201 if (init(t, 2, tx_port+1) < 0 ||
202 create_ports(t, tx_port+1) < 0 ||
203 create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
204 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
210 * We need three ports, all mapped to the same ordered qid0. Then we'll
211 * take a packet out to each port, re-enqueue in reverse order,
212 * then make sure the reordering has taken place properly when we
213 * dequeue from the tx_port.
215 * Simplified test setup diagram:
219 * qid0 - w2_port - qid1
223 /* CQ mapping to QID for LB ports (directed mapped on create) */
224 for (i = w1_port; i <= w3_port; i++) {
225 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
228 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
235 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
238 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
243 if (rte_event_dev_start(evdev) < 0) {
244 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
247 /* Enqueue 3 packets to the rx port */
248 for (i = 0; i < 3; i++) {
250 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
252 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
256 ev.queue_id = t->qid[0];
257 ev.op = RTE_EVENT_OP_NEW;
259 mbufs[i]->seqn = MAGIC_SEQN + i;
261 /* generate pkt and enqueue */
262 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
264 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
270 /* use extra slot to make logic in loops easier */
271 struct rte_event deq_ev[w3_port + 1];
275 /* Dequeue the 3 packets, one from each worker port */
276 for (i = w1_port; i <= w3_port; i++) {
277 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
280 PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
281 rte_event_dev_dump(evdev, stdout);
284 seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
287 PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
288 "port number is %u\n", seq, i);
293 /* Enqueue each packet in reverse order, flushing after each one */
294 for (i = w3_port; i >= w1_port; i--) {
296 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
297 deq_ev[i].queue_id = t->qid[1];
298 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
300 PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
305 /* dequeue from the tx ports, we should get 3 packets */
306 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
309 /* Check to see if we've got all 3 packets */
311 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
312 __LINE__, deq_pkts, tx_port);
313 rte_event_dev_dump(evdev, stdout);
317 /* Destroy the instance */
325 atomic_basic(struct test *t)
327 const uint8_t rx_port = 0;
328 const uint8_t w1_port = 1;
329 const uint8_t w3_port = 3;
330 const uint8_t tx_port = 4;
334 struct rte_mbuf *mbufs[3];
335 const uint32_t MAGIC_SEQN = 1234;
337 /* Create instance with 5 ports */
338 if (init(t, 2, tx_port+1) < 0 ||
339 create_ports(t, tx_port+1) < 0 ||
340 create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
341 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
348 * We need three ports, all mapped to the same ordered qid0. Then we'll
349 * take a packet out to each port, re-enqueue in reverse order,
350 * then make sure the reordering has taken place properly when we
351 * dequeue from the tx_port.
353 * Simplified test setup diagram:
357 * qid0 - w2_port - qid1
361 /* CQ mapping to QID for Atomic ports (directed mapped on create) */
362 for (i = w1_port; i <= w3_port; i++) {
363 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
366 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
373 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
376 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
381 if (rte_event_dev_start(evdev) < 0) {
382 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
386 /* Enqueue 3 packets to the rx port */
387 for (i = 0; i < 3; i++) {
389 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
391 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
395 ev.queue_id = t->qid[0];
396 ev.op = RTE_EVENT_OP_NEW;
399 mbufs[i]->seqn = MAGIC_SEQN + i;
401 /* generate pkt and enqueue */
402 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
404 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
410 /* use extra slot to make logic in loops easier */
411 struct rte_event deq_ev[w3_port + 1];
413 /* Dequeue the 3 packets, one from each worker port */
414 for (i = w1_port; i <= w3_port; i++) {
416 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
419 if (t->port[i] != 2) {
421 PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
423 rte_event_dev_dump(evdev, stdout);
429 PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
431 rte_event_dev_dump(evdev, stdout);
436 for (j = 0; j < 3; j++) {
437 deq_ev[j].op = RTE_EVENT_OP_FORWARD;
438 deq_ev[j].queue_id = t->qid[1];
441 err = rte_event_enqueue_burst(evdev, t->port[i],
445 PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
456 /* dequeue from the tx ports, we should get 3 packets */
457 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
460 /* Check to see if we've got all 3 packets */
462 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
463 __LINE__, deq_pkts, tx_port);
464 rte_event_dev_dump(evdev, stdout);
472 static __rte_always_inline int
473 check_qid_stats(uint32_t id[], int index)
477 if (id[0] != 3 || id[1] != 3
480 } else if (index == 1) {
481 if (id[0] != 5 || id[1] != 5
484 } else if (index == 2) {
485 if (id[0] != 3 || id[1] != 1
495 check_statistics(void)
497 int num_ports = 3; /* Hard-coded for this app */
500 for (i = 0; i < num_ports; i++) {
501 int num_stats, num_stats_returned;
503 num_stats = rte_event_dev_xstats_names_get(0,
504 RTE_EVENT_DEV_XSTATS_PORT,
511 uint32_t id[num_stats];
512 struct rte_event_dev_xstats_name names[num_stats];
513 uint64_t values[num_stats];
515 num_stats_returned = rte_event_dev_xstats_names_get(0,
516 RTE_EVENT_DEV_XSTATS_PORT,
522 if (num_stats == num_stats_returned) {
523 num_stats_returned = rte_event_dev_xstats_get(0,
524 RTE_EVENT_DEV_XSTATS_PORT,
530 if (num_stats == num_stats_returned) {
533 err = check_qid_stats(id, i);
551 #define OLD_NUM_PACKETS 3
552 #define NEW_NUM_PACKETS 2
554 single_link_w_stats(struct test *t)
556 const uint8_t rx_port = 0;
557 const uint8_t w1_port = 1;
558 const uint8_t tx_port = 2;
562 struct rte_mbuf *mbufs[3];
565 /* Create instance with 3 ports */
566 if (init(t, 2, tx_port + 1) < 0 ||
567 create_ports(t, 3) < 0 || /* 0,1,2 */
568 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
569 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
570 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
577 * Simplified test setup diagram:
581 * qid0 - w1_port(1) - qid1
586 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
589 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
597 err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
600 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
608 if (rte_event_dev_start(evdev) != 0) {
609 PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
615 * Enqueue 3 packets to the rx port
617 for (i = 0; i < 3; i++) {
619 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
621 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
625 ev.queue_id = t->qid[0];
626 ev.op = RTE_EVENT_OP_NEW;
628 mbufs[i]->seqn = 1234 + i;
630 /* generate pkt and enqueue */
631 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
633 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
641 /* Dequeue the 3 packets, from SINGLE_LINK worker port */
642 struct rte_event deq_ev[3];
644 deq_pkts = rte_event_dequeue_burst(evdev,
649 PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
654 /* Just enqueue 2 onto new ring */
655 for (i = 0; i < NEW_NUM_PACKETS; i++)
656 deq_ev[i].queue_id = t->qid[1];
658 deq_pkts = rte_event_enqueue_burst(evdev,
664 PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
669 /* dequeue from the tx ports, we should get 2 packets */
670 deq_pkts = rte_event_dequeue_burst(evdev,
676 /* Check to see if we've got all 2 packets */
678 PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
679 __LINE__, deq_pkts, tx_port);
684 if (!check_statistics()) {
685 PMD_DRV_LOG(ERR, "xstats check failed");
696 single_link(struct test *t)
698 /* const uint8_t rx_port = 0; */
699 /* const uint8_t w1_port = 1; */
700 /* const uint8_t w3_port = 3; */
701 const uint8_t tx_port = 2;
703 struct rte_mbuf *mbufs[3];
706 /* Create instance with 5 ports */
707 if (init(t, 2, tx_port+1) < 0 ||
708 create_ports(t, 3) < 0 || /* 0,1,2 */
709 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
710 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
711 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
718 * Simplified test setup diagram:
722 * qid0 - w1_port(1) - qid1
727 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
730 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
735 err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
738 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
743 if (rte_event_dev_start(evdev) == 0) {
744 PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
745 "SINGLE_LINK PORT\n", __LINE__);
756 static __rte_always_inline void
757 populate_event_burst(struct rte_event ev[],
762 for (i = 0; i < num_events; i++) {
764 ev[i].op = RTE_EVENT_OP_NEW;
765 ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
766 ev[i].queue_id = qid;
767 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
768 ev[i].sub_event_type = 0;
769 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
770 ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
775 #define BATCH_SIZE 32
778 qid_basic(struct test *t)
788 struct rte_event ev[BATCH_SIZE];
790 /* Create instance with 4 ports */
791 if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
792 create_ports(t, NUM_QUEUES+1) < 0 ||
793 create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
794 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
798 for (i = 0; i < NUM_QUEUES; i++) {
802 nb_linked = rte_event_port_link(evdev,
803 i+1, /* port = q_id + 1*/
808 if (nb_linked != 1) {
810 PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
823 /* Try and link to the same port again */
826 if (rte_event_port_link(evdev,
831 PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
837 uint32_t test_num_events;
840 test_num_events = rte_event_dequeue_burst(evdev,
845 if (test_num_events != 0) {
846 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
855 test_num_events = rte_event_enqueue_burst(evdev,
859 if (test_num_events != 0) {
860 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
870 /* Start the devicea */
872 if (rte_event_dev_start(evdev) < 0) {
873 PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
881 /* Check we can't do any more links now that device is started.*/
884 if (rte_event_port_link(evdev,
889 PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
900 populate_event_burst(ev,
904 num_events = rte_event_enqueue_burst(evdev,
908 if (num_events != BATCH_SIZE) {
909 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
917 while (++p_id < NUM_QUEUES) {
919 num_events = rte_event_dequeue_burst(evdev,
925 if (num_events != BATCH_SIZE) {
926 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
934 if (ev[0].queue_id != q_id) {
935 PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
945 populate_event_burst(ev,
949 num_events = rte_event_enqueue_burst(evdev,
953 if (num_events != BATCH_SIZE) {
954 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
966 num_events = rte_event_dequeue_burst(evdev,
971 if (num_events != BATCH_SIZE) {
972 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
990 struct test *t = malloc(sizeof(struct test));
993 const char *eventdev_name = "event_opdl0";
995 evdev = rte_event_dev_get_dev_id(eventdev_name);
998 PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
999 __LINE__, eventdev_name);
1000 /* turn on stats by default */
1001 if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
1002 PMD_DRV_LOG(ERR, "Error creating eventdev\n");
1006 evdev = rte_event_dev_get_dev_id(eventdev_name);
1008 PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
1014 /* Only create mbuf pool once, reuse for each test run */
1015 if (!eventdev_func_mempool) {
1016 eventdev_func_mempool = rte_pktmbuf_pool_create(
1017 "EVENTDEV_SW_SA_MBUF_POOL",
1018 (1<<12), /* 4k buffers */
1019 32 /*MBUF_CACHE_SIZE*/,
1021 512, /* use very small mbufs */
1023 if (!eventdev_func_mempool) {
1024 PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
1029 t->mbuf_pool = eventdev_func_mempool;
1031 PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
1032 ret = ordered_basic(t);
1034 PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
1035 ret = atomic_basic(t);
1038 PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n");
1041 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
1042 ret = single_link(t);
1044 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
1045 ret = single_link_w_stats(t);
1048 * Free test instance, free mempool
1050 rte_mempool_free(t->mbuf_pool);