2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
10 worker_generic_burst(void *arg)
12 struct rte_event events[BATCH_SIZE];
14 struct worker_data *data = (struct worker_data *)arg;
15 uint8_t dev_id = data->dev_id;
16 uint8_t port_id = data->port_id;
17 size_t sent = 0, received = 0;
18 unsigned int lcore_id = rte_lcore_id();
20 while (!fdata->done) {
23 if (fdata->cap.scheduler)
24 fdata->cap.scheduler(lcore_id);
26 if (!fdata->worker_core[lcore_id]) {
31 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
32 events, RTE_DIM(events), 0);
40 for (i = 0; i < nb_rx; i++) {
42 /* The first worker stage does classification */
43 if (events[i].queue_id == cdata.qid[0])
44 events[i].flow_id = events[i].mbuf->hash.rss
47 events[i].queue_id = cdata.next_qid[events[i].queue_id];
48 events[i].op = RTE_EVENT_OP_FORWARD;
49 events[i].sched_type = cdata.queue_type;
53 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
55 while (nb_tx < nb_rx && !fdata->done)
56 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
63 printf(" worker %u thread done. RX=%zu TX=%zu\n",
64 rte_lcore_id(), received, sent);
69 static __rte_always_inline int
72 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
73 struct rte_event packets[BATCH_SIZE];
75 static uint64_t received;
76 static uint64_t last_pkts;
77 static uint64_t last_time;
78 static uint64_t start_time;
80 uint8_t dev_id = cons_data.dev_id;
81 uint8_t port_id = cons_data.port_id;
82 uint16_t nb_ports = rte_eth_dev_count();
85 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
86 packets, RTE_DIM(packets), 0);
89 for (j = 0; j < nb_ports; j++)
90 rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
94 last_time = start_time = rte_get_timer_cycles();
97 for (i = 0; i < n; i++) {
98 uint8_t outport = packets[i].mbuf->port;
99 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
102 packets[i].op = RTE_EVENT_OP_RELEASE;
105 if (cons_data.release) {
108 nb_tx = rte_event_enqueue_burst(dev_id, port_id,
111 nb_tx += rte_event_enqueue_burst(dev_id,
112 port_id, packets + nb_tx,
116 /* Print out mpps every 1<22 packets */
117 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
118 const uint64_t now = rte_get_timer_cycles();
119 const uint64_t total_ms = (now - start_time) / freq_khz;
120 const uint64_t delta_ms = (now - last_time) / freq_khz;
121 uint64_t delta_pkts = received - last_pkts;
123 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
124 "avg %.3f mpps [current %.3f mpps]\n",
127 received / (total_ms * 1000.0),
128 delta_pkts / (delta_ms * 1000.0));
129 last_pkts = received;
133 cdata.num_packets -= n;
134 if (cdata.num_packets <= 0)
136 /* Be stuck in this loop if single. */
137 } while (!fdata->done && fdata->tx_single);
143 setup_eventdev_generic(struct cons_data *cons_data,
144 struct worker_data *worker_data)
146 const uint8_t dev_id = 0;
147 /* +1 stages is for a SINGLE_LINK TX stage */
148 const uint8_t nb_queues = cdata.num_stages + 1;
149 /* + 1 is one port for consumer */
150 const uint8_t nb_ports = cdata.num_workers + 1;
151 struct rte_event_dev_config config = {
152 .nb_event_queues = nb_queues,
153 .nb_event_ports = nb_ports,
154 .nb_events_limit = 4096,
155 .nb_event_queue_flows = 1024,
156 .nb_event_port_dequeue_depth = 128,
157 .nb_event_port_enqueue_depth = 128,
159 struct rte_event_port_conf wkr_p_conf = {
160 .dequeue_depth = cdata.worker_cq_depth,
162 .new_event_threshold = 4096,
164 struct rte_event_queue_conf wkr_q_conf = {
165 .schedule_type = cdata.queue_type,
166 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
167 .nb_atomic_flows = 1024,
168 .nb_atomic_order_sequences = 1024,
170 struct rte_event_port_conf tx_p_conf = {
171 .dequeue_depth = 128,
172 .enqueue_depth = 128,
173 .new_event_threshold = 4096,
175 struct rte_event_queue_conf tx_q_conf = {
176 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
177 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
180 struct port_link worker_queues[MAX_NUM_STAGES];
181 uint8_t disable_implicit_release;
182 struct port_link tx_queue;
185 int ret, ndev = rte_event_dev_count();
187 printf("%d: No Eventdev Devices Found\n", __LINE__);
191 struct rte_event_dev_info dev_info;
192 ret = rte_event_dev_info_get(dev_id, &dev_info);
193 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
195 disable_implicit_release = (dev_info.event_dev_cap &
196 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
198 wkr_p_conf.disable_implicit_release = disable_implicit_release;
199 tx_p_conf.disable_implicit_release = disable_implicit_release;
201 if (dev_info.max_event_port_dequeue_depth <
202 config.nb_event_port_dequeue_depth)
203 config.nb_event_port_dequeue_depth =
204 dev_info.max_event_port_dequeue_depth;
205 if (dev_info.max_event_port_enqueue_depth <
206 config.nb_event_port_enqueue_depth)
207 config.nb_event_port_enqueue_depth =
208 dev_info.max_event_port_enqueue_depth;
210 ret = rte_event_dev_configure(dev_id, &config);
212 printf("%d: Error configuring device\n", __LINE__);
216 /* Q creation - one load balanced per pipeline stage*/
217 printf(" Stages:\n");
218 for (i = 0; i < cdata.num_stages; i++) {
219 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
220 printf("%d: error creating qid %d\n", __LINE__, i);
224 cdata.next_qid[i] = i+1;
225 worker_queues[i].queue_id = i;
226 if (cdata.enable_queue_priorities) {
227 /* calculate priority stepping for each stage, leaving
228 * headroom of 1 for the SINGLE_LINK TX below
230 const uint32_t prio_delta =
231 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
233 /* higher priority for queues closer to tx */
234 wkr_q_conf.priority =
235 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
238 const char *type_str = "Atomic";
239 switch (wkr_q_conf.schedule_type) {
240 case RTE_SCHED_TYPE_ORDERED:
241 type_str = "Ordered";
243 case RTE_SCHED_TYPE_PARALLEL:
244 type_str = "Parallel";
247 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
248 wkr_q_conf.priority);
252 /* final queue for sending to TX core */
253 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
254 printf("%d: error creating qid %d\n", __LINE__, i);
257 tx_queue.queue_id = i;
258 tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
260 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
261 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
262 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
263 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
265 /* set up one port per worker, linking to all stage queues */
266 for (i = 0; i < cdata.num_workers; i++) {
267 struct worker_data *w = &worker_data[i];
269 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
270 printf("Error setting up port %d\n", i);
275 for (s = 0; s < cdata.num_stages; s++) {
276 if (rte_event_port_link(dev_id, i,
277 &worker_queues[s].queue_id,
278 &worker_queues[s].priority,
280 printf("%d: error creating link for port %d\n",
288 if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
289 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
290 if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
291 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
293 /* port for consumer, linked to TX queue */
294 if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
295 printf("Error setting up port %d\n", i);
298 if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
299 &tx_queue.priority, 1) != 1) {
300 printf("%d: error creating link for port %d\n",
304 *cons_data = (struct cons_data){.dev_id = dev_id,
306 .release = disable_implicit_release };
308 ret = rte_event_dev_service_id_get(dev_id,
309 &fdata->evdev_service_id);
310 if (ret != -ESRCH && ret != 0) {
311 printf("Error getting the service ID for sw eventdev\n");
314 rte_service_runstate_set(fdata->evdev_service_id, 1);
315 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
316 if (rte_event_dev_start(dev_id) < 0) {
317 printf("Error starting eventdev\n");
325 init_rx_adapter(uint16_t nb_ports)
329 uint8_t evdev_id = 0;
330 struct rte_event_dev_info dev_info;
332 ret = rte_event_dev_info_get(evdev_id, &dev_info);
334 struct rte_event_port_conf rx_p_conf = {
337 .new_event_threshold = 1200,
340 if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
341 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
342 if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
343 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
345 /* Create one adapter for all the ethernet ports. */
346 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
349 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
350 cdata.rx_adapter_id);
352 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
353 .ev.sched_type = cdata.queue_type,
354 .ev.queue_id = cdata.qid[0],
357 for (i = 0; i < nb_ports; i++) {
360 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
362 rte_exit(EXIT_FAILURE,
363 "failed to get event rx adapter "
366 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
369 rte_exit(EXIT_FAILURE,
370 "Failed to add queues to Rx adapter");
373 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
374 &fdata->rxadptr_service_id);
375 if (ret != -ESRCH && ret != 0) {
376 rte_exit(EXIT_FAILURE,
377 "Error getting the service ID for sw eventdev\n");
379 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
380 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
382 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
384 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
385 cdata.rx_adapter_id);
389 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
392 caps->consumer = consumer_burst;
393 caps->worker = worker_generic_burst;
395 caps->adptr_setup = init_rx_adapter;
396 caps->scheduler = schedule_devices;
397 caps->evdev_setup = setup_eventdev_generic;