1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_order_common.h"
8 order_test_result(struct evt_test *test, struct evt_options *opt)
11 struct test_order *t = evt_test_priv(test);
17 order_producer(void *arg)
19 struct prod_data *p = arg;
20 struct test_order *t = p->t;
21 struct evt_options *opt = t->opt;
22 const uint8_t dev_id = p->dev_id;
23 const uint8_t port = p->port_id;
24 struct rte_mempool *pool = t->pool;
25 const uint64_t nb_pkts = t->nb_pkts;
26 uint32_t *producer_flow_seq = t->producer_flow_seq;
27 const uint32_t nb_flows = t->nb_flows;
32 if (opt->verbose_level > 1)
33 printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
34 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
37 ev.op = RTE_EVENT_OP_NEW;
38 ev.queue_id = p->queue_id;
39 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
40 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41 ev.event_type = RTE_EVENT_TYPE_CPU;
42 ev.sub_event_type = 0; /* stage 0 */
44 while (count < nb_pkts && t->err == false) {
45 m = rte_pktmbuf_alloc(pool);
49 const uint32_t flow = (uintptr_t)m % nb_flows;
50 /* Maintain seq number per flow */
51 m->seqn = producer_flow_seq[flow]++;
56 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
68 order_opt_check(struct evt_options *opt)
70 if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
71 evt_err("Invalid producer type");
75 /* 1 producer + N workers + 1 master */
76 if (rte_lcore_count() < 3) {
77 evt_err("test need minimum 3 lcores");
81 /* Validate worker lcores */
82 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
83 evt_err("worker lcores overlaps with master lcore");
87 if (evt_nr_active_lcores(opt->plcores) == 0) {
88 evt_err("missing the producer lcore");
92 if (evt_nr_active_lcores(opt->plcores) != 1) {
93 evt_err("only one producer lcore must be selected");
97 int plcore = evt_get_first_active_lcore(opt->plcores);
100 evt_err("failed to find active producer");
104 if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
105 evt_err("worker lcores overlaps producer lcore");
108 if (evt_has_disabled_lcore(opt->wlcores)) {
109 evt_err("one or more workers lcores are not enabled");
112 if (!evt_has_active_lcore(opt->wlcores)) {
113 evt_err("minimum one worker is required");
117 /* Validate producer lcore */
118 if (plcore == (int)rte_get_master_lcore()) {
119 evt_err("producer lcore and master lcore should be different");
122 if (!rte_lcore_is_enabled(plcore)) {
123 evt_err("producer lcore is not enabled");
128 if (opt->nb_pkts == 0)
129 opt->nb_pkts = INT64_MAX;
135 order_test_setup(struct evt_test *test, struct evt_options *opt)
139 test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
140 RTE_CACHE_LINE_SIZE, opt->socket_id);
141 if (test_order == NULL) {
142 evt_err("failed to allocate test_order memory");
145 test->test_priv = test_order;
147 struct test_order *t = evt_test_priv(test);
149 t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
150 sizeof(*t->producer_flow_seq) * opt->nb_flows,
151 RTE_CACHE_LINE_SIZE, opt->socket_id);
153 if (t->producer_flow_seq == NULL) {
154 evt_err("failed to allocate t->producer_flow_seq memory");
158 t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
159 sizeof(*t->expected_flow_seq) * opt->nb_flows,
160 RTE_CACHE_LINE_SIZE, opt->socket_id);
162 if (t->expected_flow_seq == NULL) {
163 evt_err("failed to allocate t->expected_flow_seq memory");
166 rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
168 t->nb_pkts = opt->nb_pkts;
169 t->nb_flows = opt->nb_flows;
170 t->result = EVT_TEST_FAILED;
175 rte_free(t->producer_flow_seq);
177 rte_free(test->test_priv);
183 order_test_destroy(struct evt_test *test, struct evt_options *opt)
186 struct test_order *t = evt_test_priv(test);
188 rte_free(t->expected_flow_seq);
189 rte_free(t->producer_flow_seq);
190 rte_free(test->test_priv);
194 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
196 struct test_order *t = evt_test_priv(test);
198 t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
200 512, /* Use very small mbufs */
202 if (t->pool == NULL) {
203 evt_err("failed to create mempool");
211 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
214 struct test_order *t = evt_test_priv(test);
216 rte_mempool_free(t->pool);
220 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
224 rte_event_dev_stop(opt->dev_id);
225 rte_event_dev_close(opt->dev_id);
229 order_opt_dump(struct evt_options *opt)
231 evt_dump_producer_lcores(opt);
232 evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
233 evt_dump_worker_lcores(opt);
234 evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
238 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
239 int (*worker)(void *))
242 struct test_order *t = evt_test_priv(test);
246 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
247 if (!(opt->wlcores[lcore_id]))
250 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
253 evt_err("failed to launch worker %d", lcore_id);
259 /* launch producer */
260 int plcore = evt_get_first_active_lcore(opt->plcores);
262 ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
264 evt_err("failed to launch order_producer %d", plcore);
268 uint64_t cycles = rte_get_timer_cycles();
269 int64_t old_remaining = -1;
271 while (t->err == false) {
272 uint64_t new_cycles = rte_get_timer_cycles();
273 int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
275 if (remaining <= 0) {
276 t->result = EVT_TEST_SUCCESS;
280 if (new_cycles - cycles > rte_get_timer_hz() * 1) {
281 printf(CLGRN"\r%"PRId64""CLNRM, remaining);
283 if (old_remaining == remaining) {
284 rte_event_dev_dump(opt->dev_id, stdout);
285 evt_err("No schedules for seconds, deadlock");
290 old_remaining = remaining;
300 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
301 uint8_t nb_workers, uint8_t nb_queues)
305 struct test_order *t = evt_test_priv(test);
306 struct rte_event_dev_info dev_info;
308 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
309 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
311 evt_err("failed to get eventdev info %d", opt->dev_id);
315 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
316 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
318 /* port configuration */
319 const struct rte_event_port_conf p_conf = {
320 .dequeue_depth = opt->wkr_deq_dep,
321 .enqueue_depth = dev_info.max_event_port_dequeue_depth,
322 .new_event_threshold = dev_info.max_num_events,
325 /* setup one port per worker, linking to all queues */
326 for (port = 0; port < nb_workers; port++) {
327 struct worker_data *w = &t->worker[port];
329 w->dev_id = opt->dev_id;
333 ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
335 evt_err("failed to setup port %d", port);
339 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
340 if (ret != nb_queues) {
341 evt_err("failed to link all queues to port %d", port);
345 struct prod_data *p = &t->prod;
347 p->dev_id = opt->dev_id;
348 p->port_id = port; /* last port */
352 ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
354 evt_err("failed to setup producer port %d", port);