1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef _TEST_ORDER_COMMON_
6 #define _TEST_ORDER_COMMON_
11 #include <rte_cycles.h>
12 #include <rte_eventdev.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
17 #include "evt_common.h"
18 #include "evt_options.h"
39 /* Don't change the offset of "err". Signal handler use this memory
40 * to terminate all lcores work.
44 * The atomic_* is an expensive operation,Since it is a functional test,
45 * We are using the atomic_ operation to reduce the code complexity.
47 rte_atomic64_t outstand_pkts;
48 enum evt_test_result result;
51 struct rte_mempool *pool;
52 struct prod_data prod;
53 struct worker_data worker[EVT_MAX_PORTS];
54 uint32_t *producer_flow_seq;
55 uint32_t *expected_flow_seq;
56 struct evt_options *opt;
57 } __rte_cache_aligned;
60 order_nb_event_ports(struct evt_options *opt)
62 return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
65 static inline __attribute__((always_inline)) void
66 order_process_stage_1(struct test_order *const t,
67 struct rte_event *const ev, const uint32_t nb_flows,
68 uint32_t *const expected_flow_seq,
69 rte_atomic64_t *const outstand_pkts)
71 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
72 /* compare the seqn against expected value */
73 if (ev->mbuf->seqn != expected_flow_seq[flow]) {
74 evt_err("flow=%x seqn mismatch got=%x expected=%x",
75 flow, ev->mbuf->seqn, expected_flow_seq[flow]);
80 * Events from an atomic flow of an event queue can be scheduled only to
81 * a single port at a time. The port is guaranteed to have exclusive
82 * (atomic) access for given atomic flow.So we don't need to update
83 * expected_flow_seq in critical section.
85 expected_flow_seq[flow]++;
86 rte_pktmbuf_free(ev->mbuf);
87 rte_atomic64_sub(outstand_pkts, 1);
90 static inline __attribute__((always_inline)) void
91 order_process_stage_invalid(struct test_order *const t,
92 struct rte_event *const ev)
94 evt_err("invalid queue %d", ev->queue_id);
99 #define ORDER_WORKER_INIT\
100 struct worker_data *w = arg;\
101 struct test_order *t = w->t;\
102 struct evt_options *opt = t->opt;\
103 const uint8_t dev_id = w->dev_id;\
104 const uint8_t port = w->port_id;\
105 const uint32_t nb_flows = t->nb_flows;\
106 uint32_t *expected_flow_seq = t->expected_flow_seq;\
107 rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
108 if (opt->verbose_level > 1)\
109 printf("%s(): lcore %d dev_id %d port=%d\n",\
110 __func__, rte_lcore_id(), dev_id, port)
112 int order_test_result(struct evt_test *test, struct evt_options *opt);
113 int order_opt_check(struct evt_options *opt);
114 int order_test_setup(struct evt_test *test, struct evt_options *opt);
115 int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
116 int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
117 int (*worker)(void *));
118 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
119 uint8_t nb_workers, uint8_t nb_queues);
120 void order_test_destroy(struct evt_test *test, struct evt_options *opt);
121 void order_opt_dump(struct evt_options *opt);
122 void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
123 void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
125 #endif /* _TEST_ORDER_COMMON_ */