1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef _TEST_ORDER_COMMON_
6 #define _TEST_ORDER_COMMON_
11 #include <rte_cycles.h>
12 #include <rte_eventdev.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
16 #include <rte_mbuf_dyn.h>
18 #include "evt_common.h"
19 #include "evt_options.h"
24 typedef uint32_t flow_id_t;
42 /* Don't change the offset of "err". Signal handler use this memory
43 * to terminate all lcores work.
47 * The atomic_* is an expensive operation,Since it is a functional test,
48 * We are using the atomic_ operation to reduce the code complexity.
50 rte_atomic64_t outstand_pkts;
51 enum evt_test_result result;
54 struct rte_mempool *pool;
55 int flow_id_dynfield_offset;
56 struct prod_data prod;
57 struct worker_data worker[EVT_MAX_PORTS];
58 uint32_t *producer_flow_seq;
59 uint32_t *expected_flow_seq;
60 struct evt_options *opt;
61 } __rte_cache_aligned;
64 order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
66 event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
67 t->flow_id_dynfield_offset, flow_id_t *);
71 order_flow_id_save(struct test_order *t, flow_id_t flow_id,
72 struct rte_mbuf *mbuf, struct rte_event *event)
74 *RTE_MBUF_DYNFIELD(mbuf,
75 t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
76 event->flow_id = flow_id;
81 order_nb_event_ports(struct evt_options *opt)
83 return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
86 static __rte_always_inline void
87 order_process_stage_1(struct test_order *const t,
88 struct rte_event *const ev, const uint32_t nb_flows,
89 uint32_t *const expected_flow_seq,
90 rte_atomic64_t *const outstand_pkts)
92 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
93 /* compare the seqn against expected value */
94 if (ev->mbuf->seqn != expected_flow_seq[flow]) {
95 evt_err("flow=%x seqn mismatch got=%x expected=%x",
96 flow, ev->mbuf->seqn, expected_flow_seq[flow]);
101 * Events from an atomic flow of an event queue can be scheduled only to
102 * a single port at a time. The port is guaranteed to have exclusive
103 * (atomic) access for given atomic flow.So we don't need to update
104 * expected_flow_seq in critical section.
106 expected_flow_seq[flow]++;
107 rte_pktmbuf_free(ev->mbuf);
108 rte_atomic64_sub(outstand_pkts, 1);
111 static __rte_always_inline void
112 order_process_stage_invalid(struct test_order *const t,
113 struct rte_event *const ev)
115 evt_err("invalid queue %d", ev->queue_id);
120 #define ORDER_WORKER_INIT\
121 struct worker_data *w = arg;\
122 struct test_order *t = w->t;\
123 struct evt_options *opt = t->opt;\
124 const uint8_t dev_id = w->dev_id;\
125 const uint8_t port = w->port_id;\
126 const uint32_t nb_flows = t->nb_flows;\
127 uint32_t *expected_flow_seq = t->expected_flow_seq;\
128 rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
129 if (opt->verbose_level > 1)\
130 printf("%s(): lcore %d dev_id %d port=%d\n",\
131 __func__, rte_lcore_id(), dev_id, port)
133 int order_test_result(struct evt_test *test, struct evt_options *opt);
134 int order_opt_check(struct evt_options *opt);
135 int order_test_setup(struct evt_test *test, struct evt_options *opt);
136 int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
137 int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
138 int (*worker)(void *));
139 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
140 uint8_t nb_workers, uint8_t nb_queues);
141 void order_test_destroy(struct evt_test *test, struct evt_options *opt);
142 void order_opt_dump(struct evt_options *opt);
143 void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
144 void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
146 #endif /* _TEST_ORDER_COMMON_ */