+
+int
+order_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_order *t = evt_test_priv(test);
+
+ t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
+ 256 /* Cache */, 0,
+ 512, /* Use very small mbufs */
+ opt->socket_id);
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+void
+order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+void
+order_opt_dump(struct evt_options *opt)
+{
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
+}
+
+int
+order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_order *t = evt_test_priv(test);
+
+ int wkr_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ wkr_idx++;
+ }
+
+ /* launch producer */
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
+ if (ret) {
+ evt_err("failed to launch order_producer %d", plcore);
+ return ret;
+ }
+
+ uint64_t cycles = rte_get_timer_cycles();
+ int64_t old_remaining = -1;
+
+ while (t->err == false) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+ int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ break;
+ }
+
+ if (new_cycles - cycles > rte_get_timer_hz() * 1) {
+ printf(CLGRN"\r%"PRId64""CLNRM, remaining);
+ fflush(stdout);
+ if (old_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->err = true;
+ rte_smp_wmb();
+ break;
+ }
+ old_remaining = remaining;
+ cycles = new_cycles;
+ }
+ }
+ printf("\r");
+
+ return 0;
+}
+
+int
+order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues)
+{
+ int ret;
+ uint8_t port;
+ struct test_order *t = evt_test_priv(test);
+ struct rte_event_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < nb_workers; port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+ struct prod_data *p = &t->prod;
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port; /* last port */
+ p->queue_id = 0;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
+ if (ret) {
+ evt_err("failed to setup producer port %d", port);
+ return ret;
+ }
+
+ return ret;
+}