net/ice/base: fix PF ID for DCF
[dpdk.git] / app / test-eventdev / test_order_atq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include "test_order_common.h"
9
10 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
11
12 static __rte_always_inline void
13 order_atq_process_stage_0(struct rte_event *const ev)
14 {
15         ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
16         ev->op = RTE_EVENT_OP_FORWARD;
17         ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
18         ev->event_type = RTE_EVENT_TYPE_CPU;
19 }
20
21 static int
22 order_atq_worker(void *arg, const bool flow_id_cap)
23 {
24         ORDER_WORKER_INIT;
25         struct rte_event ev;
26
27         while (t->err == false) {
28                 uint16_t event = rte_event_dequeue_burst(dev_id, port,
29                                         &ev, 1, 0);
30                 if (!event) {
31                         if (rte_atomic64_read(outstand_pkts) <= 0)
32                                 break;
33                         rte_pause();
34                         continue;
35                 }
36
37                 if (!flow_id_cap)
38                         order_flow_id_copy_from_mbuf(t, &ev);
39
40                 if (ev.sub_event_type == 0) { /* stage 0 from producer */
41                         order_atq_process_stage_0(&ev);
42                         while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
43                                         != 1)
44                                 rte_pause();
45                 } else if (ev.sub_event_type == 1) { /* stage 1  */
46                         order_process_stage_1(t, &ev, nb_flows,
47                                         expected_flow_seq, outstand_pkts);
48                 } else {
49                         order_process_stage_invalid(t, &ev);
50                 }
51         }
52         return 0;
53 }
54
55 static int
56 order_atq_worker_burst(void *arg, const bool flow_id_cap)
57 {
58         ORDER_WORKER_INIT;
59         struct rte_event ev[BURST_SIZE];
60         uint16_t i;
61
62         while (t->err == false) {
63                 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
64                                 BURST_SIZE, 0);
65
66                 if (nb_rx == 0) {
67                         if (rte_atomic64_read(outstand_pkts) <= 0)
68                                 break;
69                         rte_pause();
70                         continue;
71                 }
72
73                 for (i = 0; i < nb_rx; i++) {
74                         if (!flow_id_cap)
75                                 order_flow_id_copy_from_mbuf(t, &ev[i]);
76
77                         if (ev[i].sub_event_type == 0) { /*stage 0 */
78                                 order_atq_process_stage_0(&ev[i]);
79                         } else if (ev[i].sub_event_type == 1) { /* stage 1 */
80                                 order_process_stage_1(t, &ev[i], nb_flows,
81                                         expected_flow_seq, outstand_pkts);
82                                 ev[i].op = RTE_EVENT_OP_RELEASE;
83                         } else {
84                                 order_process_stage_invalid(t, &ev[i]);
85                         }
86                 }
87
88                 uint16_t enq;
89
90                 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
91                 while (enq < nb_rx) {
92                         enq += rte_event_enqueue_burst(dev_id, port,
93                                                         ev + enq, nb_rx - enq);
94                 }
95         }
96         return 0;
97 }
98
99 static int
100 worker_wrapper(void *arg)
101 {
102         struct worker_data *w  = arg;
103         const bool burst = evt_has_burst_mode(w->dev_id);
104         const bool flow_id_cap = evt_has_flow_id(w->dev_id);
105
106         if (burst) {
107                 if (flow_id_cap)
108                         return order_atq_worker_burst(arg, true);
109                 else
110                         return order_atq_worker_burst(arg, false);
111         } else {
112                 if (flow_id_cap)
113                         return order_atq_worker(arg, true);
114                 else
115                         return order_atq_worker(arg, false);
116         }
117 }
118
119 static int
120 order_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
121 {
122         return order_launch_lcores(test, opt, worker_wrapper);
123 }
124
125 #define NB_QUEUES 1
126 static int
127 order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
128 {
129         int ret;
130
131         const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
132         /* number of active worker cores + 1 producer */
133         const uint8_t nb_ports = nb_workers + 1;
134
135         ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
136         if (ret) {
137                 evt_err("failed to configure eventdev %d", opt->dev_id);
138                 return ret;
139         }
140
141         /* q0 all types queue configuration */
142         struct rte_event_queue_conf q0_conf = {
143                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
144                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
145                         .nb_atomic_flows = opt->nb_flows,
146                         .nb_atomic_order_sequences = opt->nb_flows,
147         };
148         ret = rte_event_queue_setup(opt->dev_id, 0, &q0_conf);
149         if (ret) {
150                 evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
151                 return ret;
152         }
153
154         /* setup one port per worker, linking to all queues */
155         ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
156         if (ret)
157                 return ret;
158
159         if (!evt_has_distributed_sched(opt->dev_id)) {
160                 uint32_t service_id;
161                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
162                 ret = evt_service_setup(service_id);
163                 if (ret) {
164                         evt_err("No service lcore found to run event dev.");
165                         return ret;
166                 }
167         }
168
169         ret = rte_event_dev_start(opt->dev_id);
170         if (ret) {
171                 evt_err("failed to start eventdev %d", opt->dev_id);
172                 return ret;
173         }
174
175         return 0;
176 }
177
178 static void
179 order_atq_opt_dump(struct evt_options *opt)
180 {
181         order_opt_dump(opt);
182         evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
183 }
184
185 static bool
186 order_atq_capability_check(struct evt_options *opt)
187 {
188         struct rte_event_dev_info dev_info;
189
190         rte_event_dev_info_get(opt->dev_id, &dev_info);
191         if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
192                         order_nb_event_ports(opt)) {
193                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
194                         NB_QUEUES, dev_info.max_event_queues,
195                         order_nb_event_ports(opt), dev_info.max_event_ports);
196                 return false;
197         }
198
199         if (!evt_has_all_types_queue(opt->dev_id))
200                 return false;
201
202         return true;
203 }
204
205 static const struct evt_test_ops order_atq =  {
206         .cap_check          = order_atq_capability_check,
207         .opt_check          = order_opt_check,
208         .opt_dump           = order_atq_opt_dump,
209         .test_setup         = order_test_setup,
210         .mempool_setup      = order_mempool_setup,
211         .eventdev_setup     = order_atq_eventdev_setup,
212         .launch_lcores      = order_atq_launch_lcores,
213         .eventdev_destroy   = order_eventdev_destroy,
214         .mempool_destroy    = order_mempool_destroy,
215         .test_result        = order_test_result,
216         .test_destroy       = order_test_destroy,
217 };
218
219 EVT_TEST_REGISTER(order_atq);