4 * Copyright (C) Cavium, Inc 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "test_order_common.h"
38 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
40 static inline __attribute__((always_inline)) void
41 order_atq_process_stage_0(struct rte_event *const ev)
43 ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
44 ev->op = RTE_EVENT_OP_FORWARD;
45 ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
46 ev->event_type = RTE_EVENT_TYPE_CPU;
50 order_atq_worker(void *arg)
55 while (t->err == false) {
56 uint16_t event = rte_event_dequeue_burst(dev_id, port,
59 if (rte_atomic64_read(outstand_pkts) <= 0)
65 if (ev.sub_event_type == 0) { /* stage 0 from producer */
66 order_atq_process_stage_0(&ev);
67 while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
70 } else if (ev.sub_event_type == 1) { /* stage 1 */
71 order_process_stage_1(t, &ev, nb_flows,
72 expected_flow_seq, outstand_pkts);
74 order_process_stage_invalid(t, &ev);
81 order_atq_worker_burst(void *arg)
84 struct rte_event ev[BURST_SIZE];
87 while (t->err == false) {
88 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
92 if (rte_atomic64_read(outstand_pkts) <= 0)
98 for (i = 0; i < nb_rx; i++) {
99 if (ev[i].sub_event_type == 0) { /*stage 0 */
100 order_atq_process_stage_0(&ev[i]);
101 } else if (ev[i].sub_event_type == 1) { /* stage 1 */
102 order_process_stage_1(t, &ev[i], nb_flows,
103 expected_flow_seq, outstand_pkts);
104 ev[i].op = RTE_EVENT_OP_RELEASE;
106 order_process_stage_invalid(t, &ev[i]);
112 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
113 while (enq < nb_rx) {
114 enq += rte_event_enqueue_burst(dev_id, port,
115 ev + enq, nb_rx - enq);
122 worker_wrapper(void *arg)
124 struct worker_data *w = arg;
125 const bool burst = evt_has_burst_mode(w->dev_id);
128 return order_atq_worker_burst(arg);
130 return order_atq_worker(arg);
134 order_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
136 return order_launch_lcores(test, opt, worker_wrapper);
141 order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
145 const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
146 /* number of active worker cores + 1 producer */
147 const uint8_t nb_ports = nb_workers + 1;
149 const struct rte_event_dev_config config = {
150 .nb_event_queues = NB_QUEUES,/* one all types queue */
151 .nb_event_ports = nb_ports,
152 .nb_events_limit = 4096,
153 .nb_event_queue_flows = opt->nb_flows,
154 .nb_event_port_dequeue_depth = 128,
155 .nb_event_port_enqueue_depth = 128,
158 ret = rte_event_dev_configure(opt->dev_id, &config);
160 evt_err("failed to configure eventdev %d", opt->dev_id);
164 /* q0 all types queue configuration */
165 struct rte_event_queue_conf q0_conf = {
166 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
167 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
168 .nb_atomic_flows = opt->nb_flows,
169 .nb_atomic_order_sequences = opt->nb_flows,
171 ret = rte_event_queue_setup(opt->dev_id, 0, &q0_conf);
173 evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
177 /* setup one port per worker, linking to all queues */
178 ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
182 ret = rte_event_dev_start(opt->dev_id);
184 evt_err("failed to start eventdev %d", opt->dev_id);
192 order_atq_opt_dump(struct evt_options *opt)
195 evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
199 order_atq_capability_check(struct evt_options *opt)
201 struct rte_event_dev_info dev_info;
203 rte_event_dev_info_get(opt->dev_id, &dev_info);
204 if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
205 order_nb_event_ports(opt)) {
206 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
207 NB_QUEUES, dev_info.max_event_queues,
208 order_nb_event_ports(opt), dev_info.max_event_ports);
212 if (!evt_has_all_types_queue(opt->dev_id))
218 static const struct evt_test_ops order_atq = {
219 .cap_check = order_atq_capability_check,
220 .opt_check = order_opt_check,
221 .opt_dump = order_atq_opt_dump,
222 .test_setup = order_test_setup,
223 .mempool_setup = order_mempool_setup,
224 .eventdev_setup = order_atq_eventdev_setup,
225 .launch_lcores = order_atq_launch_lcores,
226 .eventdev_destroy = order_eventdev_destroy,
227 .mempool_destroy = order_mempool_destroy,
228 .test_result = order_test_result,
229 .test_destroy = order_test_destroy,
232 EVT_TEST_REGISTER(order_atq);