1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_order_common.h"
8 order_test_result(struct evt_test *test, struct evt_options *opt)
11 struct test_order *t = evt_test_priv(test);
17 order_producer(void *arg)
19 struct prod_data *p = arg;
20 struct test_order *t = p->t;
21 struct evt_options *opt = t->opt;
22 const uint8_t dev_id = p->dev_id;
23 const uint8_t port = p->port_id;
24 struct rte_mempool *pool = t->pool;
25 const uint64_t nb_pkts = t->nb_pkts;
26 uint32_t *producer_flow_seq = t->producer_flow_seq;
27 const uint32_t nb_flows = t->nb_flows;
32 if (opt->verbose_level > 1)
33 printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
34 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
37 ev.op = RTE_EVENT_OP_NEW;
38 ev.queue_id = p->queue_id;
39 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
40 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41 ev.event_type = RTE_EVENT_TYPE_CPU;
42 ev.sub_event_type = 0; /* stage 0 */
44 while (count < nb_pkts && t->err == false) {
45 m = rte_pktmbuf_alloc(pool);
49 const uint32_t flow = (uintptr_t)m % nb_flows;
50 /* Maintain seq number per flow */
51 m->seqn = producer_flow_seq[flow]++;
56 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
68 order_opt_check(struct evt_options *opt)
70 /* 1 producer + N workers + 1 master */
71 if (rte_lcore_count() < 3) {
72 evt_err("test need minimum 3 lcores");
76 /* Validate worker lcores */
77 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
78 evt_err("worker lcores overlaps with master lcore");
82 if (evt_nr_active_lcores(opt->plcores) == 0) {
83 evt_err("missing the producer lcore");
87 if (evt_nr_active_lcores(opt->plcores) != 1) {
88 evt_err("only one producer lcore must be selected");
92 int plcore = evt_get_first_active_lcore(opt->plcores);
95 evt_err("failed to find active producer");
99 if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
100 evt_err("worker lcores overlaps producer lcore");
103 if (evt_has_disabled_lcore(opt->wlcores)) {
104 evt_err("one or more workers lcores are not enabled");
107 if (!evt_has_active_lcore(opt->wlcores)) {
108 evt_err("minimum one worker is required");
112 /* Validate producer lcore */
113 if (plcore == (int)rte_get_master_lcore()) {
114 evt_err("producer lcore and master lcore should be different");
117 if (!rte_lcore_is_enabled(plcore)) {
118 evt_err("producer lcore is not enabled");
123 if (opt->nb_pkts == 0)
124 opt->nb_pkts = INT64_MAX;
130 order_test_setup(struct evt_test *test, struct evt_options *opt)
134 test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
135 RTE_CACHE_LINE_SIZE, opt->socket_id);
136 if (test_order == NULL) {
137 evt_err("failed to allocate test_order memory");
140 test->test_priv = test_order;
142 struct test_order *t = evt_test_priv(test);
144 t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
145 sizeof(*t->producer_flow_seq) * opt->nb_flows,
146 RTE_CACHE_LINE_SIZE, opt->socket_id);
148 if (t->producer_flow_seq == NULL) {
149 evt_err("failed to allocate t->producer_flow_seq memory");
153 t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
154 sizeof(*t->expected_flow_seq) * opt->nb_flows,
155 RTE_CACHE_LINE_SIZE, opt->socket_id);
157 if (t->expected_flow_seq == NULL) {
158 evt_err("failed to allocate t->expected_flow_seq memory");
161 rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
163 t->nb_pkts = opt->nb_pkts;
164 t->nb_flows = opt->nb_flows;
165 t->result = EVT_TEST_FAILED;
170 rte_free(t->producer_flow_seq);
172 rte_free(test->test_priv);
178 order_test_destroy(struct evt_test *test, struct evt_options *opt)
181 struct test_order *t = evt_test_priv(test);
183 rte_free(t->expected_flow_seq);
184 rte_free(t->producer_flow_seq);
185 rte_free(test->test_priv);
189 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
191 struct test_order *t = evt_test_priv(test);
193 t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
195 512, /* Use very small mbufs */
197 if (t->pool == NULL) {
198 evt_err("failed to create mempool");
206 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
209 struct test_order *t = evt_test_priv(test);
211 rte_mempool_free(t->pool);
215 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
219 rte_event_dev_stop(opt->dev_id);
220 rte_event_dev_close(opt->dev_id);
224 order_opt_dump(struct evt_options *opt)
226 evt_dump_producer_lcores(opt);
227 evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
228 evt_dump_worker_lcores(opt);
229 evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
233 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
234 int (*worker)(void *))
237 struct test_order *t = evt_test_priv(test);
241 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
242 if (!(opt->wlcores[lcore_id]))
245 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
248 evt_err("failed to launch worker %d", lcore_id);
254 /* launch producer */
255 int plcore = evt_get_first_active_lcore(opt->plcores);
257 ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
259 evt_err("failed to launch order_producer %d", plcore);
263 uint64_t cycles = rte_get_timer_cycles();
264 int64_t old_remaining = -1;
266 while (t->err == false) {
267 uint64_t new_cycles = rte_get_timer_cycles();
268 int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
270 if (remaining <= 0) {
271 t->result = EVT_TEST_SUCCESS;
275 if (new_cycles - cycles > rte_get_timer_hz() * 1) {
276 printf(CLGRN"\r%"PRId64""CLNRM, remaining);
278 if (old_remaining == remaining) {
279 rte_event_dev_dump(opt->dev_id, stdout);
280 evt_err("No schedules for seconds, deadlock");
285 old_remaining = remaining;
295 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
296 uint8_t nb_workers, uint8_t nb_queues)
300 struct test_order *t = evt_test_priv(test);
302 /* port configuration */
303 const struct rte_event_port_conf wkr_p_conf = {
304 .dequeue_depth = opt->wkr_deq_dep,
306 .new_event_threshold = 4096,
309 /* setup one port per worker, linking to all queues */
310 for (port = 0; port < nb_workers; port++) {
311 struct worker_data *w = &t->worker[port];
313 w->dev_id = opt->dev_id;
317 ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
319 evt_err("failed to setup port %d", port);
323 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
324 if (ret != nb_queues) {
325 evt_err("failed to link all queues to port %d", port);
329 /* port for producer, no links */
330 const struct rte_event_port_conf prod_conf = {
333 .new_event_threshold = 1200,
335 struct prod_data *p = &t->prod;
337 p->dev_id = opt->dev_id;
338 p->port_id = port; /* last port */
342 ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
344 evt_err("failed to setup producer port %d", port);