2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline int
10 worker_generic(void *arg)
14 struct worker_data *data = (struct worker_data *)arg;
15 uint8_t dev_id = data->dev_id;
16 uint8_t port_id = data->port_id;
17 size_t sent = 0, received = 0;
18 unsigned int lcore_id = rte_lcore_id();
20 while (!fdata->done) {
22 if (fdata->cap.scheduler)
23 fdata->cap.scheduler(lcore_id);
25 if (!fdata->worker_core[lcore_id]) {
30 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
39 /* The first worker stage does classification */
40 if (ev.queue_id == cdata.qid[0])
41 ev.flow_id = ev.mbuf->hash.rss
44 ev.queue_id = cdata.next_qid[ev.queue_id];
45 ev.op = RTE_EVENT_OP_FORWARD;
46 ev.sched_type = cdata.queue_type;
50 while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
56 printf(" worker %u thread done. RX=%zu TX=%zu\n",
57 rte_lcore_id(), received, sent);
63 worker_generic_burst(void *arg)
65 struct rte_event events[BATCH_SIZE];
67 struct worker_data *data = (struct worker_data *)arg;
68 uint8_t dev_id = data->dev_id;
69 uint8_t port_id = data->port_id;
70 size_t sent = 0, received = 0;
71 unsigned int lcore_id = rte_lcore_id();
73 while (!fdata->done) {
76 if (fdata->cap.scheduler)
77 fdata->cap.scheduler(lcore_id);
79 if (!fdata->worker_core[lcore_id]) {
84 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 events, RTE_DIM(events), 0);
93 for (i = 0; i < nb_rx; i++) {
95 /* The first worker stage does classification */
96 if (events[i].queue_id == cdata.qid[0])
97 events[i].flow_id = events[i].mbuf->hash.rss
100 events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 events[i].op = RTE_EVENT_OP_FORWARD;
102 events[i].sched_type = cdata.queue_type;
106 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
108 while (nb_tx < nb_rx && !fdata->done)
109 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
116 printf(" worker %u thread done. RX=%zu TX=%zu\n",
117 rte_lcore_id(), received, sent);
122 static __rte_always_inline int
125 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
126 struct rte_event packet;
128 static uint64_t received;
129 static uint64_t last_pkts;
130 static uint64_t last_time;
131 static uint64_t start_time;
133 uint8_t dev_id = cons_data.dev_id;
134 uint8_t port_id = cons_data.port_id;
137 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
141 RTE_ETH_FOREACH_DEV(i)
142 rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
146 last_time = start_time = rte_get_timer_cycles();
149 uint8_t outport = packet.mbuf->port;
151 exchange_mac(packet.mbuf);
152 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
155 if (cons_data.release)
156 rte_event_enqueue_burst(dev_id, port_id,
159 /* Print out mpps every 1<22 packets */
160 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
161 const uint64_t now = rte_get_timer_cycles();
162 const uint64_t total_ms = (now - start_time) / freq_khz;
163 const uint64_t delta_ms = (now - last_time) / freq_khz;
164 uint64_t delta_pkts = received - last_pkts;
166 printf("# %s RX=%"PRIu64", time %"PRIu64 "ms, "
167 "avg %.3f mpps [current %.3f mpps]\n",
171 received / (total_ms * 1000.0),
172 delta_pkts / (delta_ms * 1000.0));
173 last_pkts = received;
178 if (cdata.num_packets <= 0)
180 /* Be stuck in this loop if single. */
181 } while (!fdata->done && fdata->tx_single);
186 static __rte_always_inline int
189 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
190 struct rte_event packets[BATCH_SIZE];
192 static uint64_t received;
193 static uint64_t last_pkts;
194 static uint64_t last_time;
195 static uint64_t start_time;
197 uint8_t dev_id = cons_data.dev_id;
198 uint8_t port_id = cons_data.port_id;
201 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
202 packets, RTE_DIM(packets), 0);
205 RTE_ETH_FOREACH_DEV(j)
206 rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
210 last_time = start_time = rte_get_timer_cycles();
213 for (i = 0; i < n; i++) {
214 uint8_t outport = packets[i].mbuf->port;
216 exchange_mac(packets[i].mbuf);
217 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
220 packets[i].op = RTE_EVENT_OP_RELEASE;
223 if (cons_data.release) {
226 nb_tx = rte_event_enqueue_burst(dev_id, port_id,
229 nb_tx += rte_event_enqueue_burst(dev_id,
230 port_id, packets + nb_tx,
234 /* Print out mpps every 1<22 packets */
235 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
236 const uint64_t now = rte_get_timer_cycles();
237 const uint64_t total_ms = (now - start_time) / freq_khz;
238 const uint64_t delta_ms = (now - last_time) / freq_khz;
239 uint64_t delta_pkts = received - last_pkts;
241 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
242 "avg %.3f mpps [current %.3f mpps]\n",
245 received / (total_ms * 1000.0),
246 delta_pkts / (delta_ms * 1000.0));
247 last_pkts = received;
251 cdata.num_packets -= n;
252 if (cdata.num_packets <= 0)
254 /* Be stuck in this loop if single. */
255 } while (!fdata->done && fdata->tx_single);
261 setup_eventdev_generic(struct cons_data *cons_data,
262 struct worker_data *worker_data)
264 const uint8_t dev_id = 0;
265 /* +1 stages is for a SINGLE_LINK TX stage */
266 const uint8_t nb_queues = cdata.num_stages + 1;
267 /* + 1 is one port for consumer */
268 const uint8_t nb_ports = cdata.num_workers + 1;
269 struct rte_event_dev_config config = {
270 .nb_event_queues = nb_queues,
271 .nb_event_ports = nb_ports,
272 .nb_events_limit = 4096,
273 .nb_event_queue_flows = 1024,
274 .nb_event_port_dequeue_depth = 128,
275 .nb_event_port_enqueue_depth = 128,
277 struct rte_event_port_conf wkr_p_conf = {
278 .dequeue_depth = cdata.worker_cq_depth,
280 .new_event_threshold = 4096,
282 struct rte_event_queue_conf wkr_q_conf = {
283 .schedule_type = cdata.queue_type,
284 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
285 .nb_atomic_flows = 1024,
286 .nb_atomic_order_sequences = 1024,
288 struct rte_event_port_conf tx_p_conf = {
289 .dequeue_depth = 128,
290 .enqueue_depth = 128,
291 .new_event_threshold = 4096,
293 struct rte_event_queue_conf tx_q_conf = {
294 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
295 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
298 struct port_link worker_queues[MAX_NUM_STAGES];
299 uint8_t disable_implicit_release;
300 struct port_link tx_queue;
303 int ret, ndev = rte_event_dev_count();
305 printf("%d: No Eventdev Devices Found\n", __LINE__);
309 struct rte_event_dev_info dev_info;
310 ret = rte_event_dev_info_get(dev_id, &dev_info);
311 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
313 disable_implicit_release = (dev_info.event_dev_cap &
314 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
316 wkr_p_conf.disable_implicit_release = disable_implicit_release;
317 tx_p_conf.disable_implicit_release = disable_implicit_release;
319 if (dev_info.max_event_port_dequeue_depth <
320 config.nb_event_port_dequeue_depth)
321 config.nb_event_port_dequeue_depth =
322 dev_info.max_event_port_dequeue_depth;
323 if (dev_info.max_event_port_enqueue_depth <
324 config.nb_event_port_enqueue_depth)
325 config.nb_event_port_enqueue_depth =
326 dev_info.max_event_port_enqueue_depth;
328 ret = rte_event_dev_configure(dev_id, &config);
330 printf("%d: Error configuring device\n", __LINE__);
334 /* Q creation - one load balanced per pipeline stage*/
335 printf(" Stages:\n");
336 for (i = 0; i < cdata.num_stages; i++) {
337 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
338 printf("%d: error creating qid %d\n", __LINE__, i);
342 cdata.next_qid[i] = i+1;
343 worker_queues[i].queue_id = i;
344 if (cdata.enable_queue_priorities) {
345 /* calculate priority stepping for each stage, leaving
346 * headroom of 1 for the SINGLE_LINK TX below
348 const uint32_t prio_delta =
349 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
351 /* higher priority for queues closer to tx */
352 wkr_q_conf.priority =
353 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
356 const char *type_str = "Atomic";
357 switch (wkr_q_conf.schedule_type) {
358 case RTE_SCHED_TYPE_ORDERED:
359 type_str = "Ordered";
361 case RTE_SCHED_TYPE_PARALLEL:
362 type_str = "Parallel";
365 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
366 wkr_q_conf.priority);
370 /* final queue for sending to TX core */
371 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
372 printf("%d: error creating qid %d\n", __LINE__, i);
375 tx_queue.queue_id = i;
376 tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
378 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
379 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
380 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
381 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
383 /* set up one port per worker, linking to all stage queues */
384 for (i = 0; i < cdata.num_workers; i++) {
385 struct worker_data *w = &worker_data[i];
387 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
388 printf("Error setting up port %d\n", i);
393 for (s = 0; s < cdata.num_stages; s++) {
394 if (rte_event_port_link(dev_id, i,
395 &worker_queues[s].queue_id,
396 &worker_queues[s].priority,
398 printf("%d: error creating link for port %d\n",
406 if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
407 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
408 if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
409 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
411 /* port for consumer, linked to TX queue */
412 if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
413 printf("Error setting up port %d\n", i);
416 if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
417 &tx_queue.priority, 1) != 1) {
418 printf("%d: error creating link for port %d\n",
422 *cons_data = (struct cons_data){.dev_id = dev_id,
424 .release = disable_implicit_release };
426 ret = rte_event_dev_service_id_get(dev_id,
427 &fdata->evdev_service_id);
428 if (ret != -ESRCH && ret != 0) {
429 printf("Error getting the service ID for sw eventdev\n");
432 rte_service_runstate_set(fdata->evdev_service_id, 1);
433 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
434 if (rte_event_dev_start(dev_id) < 0) {
435 printf("Error starting eventdev\n");
443 init_rx_adapter(uint16_t nb_ports)
447 uint8_t evdev_id = 0;
448 struct rte_event_dev_info dev_info;
450 ret = rte_event_dev_info_get(evdev_id, &dev_info);
452 struct rte_event_port_conf rx_p_conf = {
455 .new_event_threshold = 1200,
458 if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
459 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
460 if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
461 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
463 /* Create one adapter for all the ethernet ports. */
464 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
467 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
468 cdata.rx_adapter_id);
470 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
471 memset(&queue_conf, 0, sizeof(queue_conf));
472 queue_conf.ev.sched_type = cdata.queue_type;
473 queue_conf.ev.queue_id = cdata.qid[0];
475 for (i = 0; i < nb_ports; i++) {
478 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
480 rte_exit(EXIT_FAILURE,
481 "failed to get event rx adapter "
484 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
487 rte_exit(EXIT_FAILURE,
488 "Failed to add queues to Rx adapter");
491 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
492 &fdata->rxadptr_service_id);
493 if (ret != -ESRCH && ret != 0) {
494 rte_exit(EXIT_FAILURE,
495 "Error getting the service ID for sw eventdev\n");
497 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
498 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
500 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
502 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
503 cdata.rx_adapter_id);
507 generic_opt_check(void)
512 uint8_t rx_needed = 0;
513 struct rte_event_dev_info eventdev_info;
515 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
516 rte_event_dev_info_get(0, &eventdev_info);
518 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
519 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
520 rte_exit(EXIT_FAILURE,
521 "Event dev doesn't support all type queues\n");
523 RTE_ETH_FOREACH_DEV(i) {
524 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
526 rte_exit(EXIT_FAILURE,
527 "failed to get event rx adapter capabilities");
529 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
532 if (cdata.worker_lcore_mask == 0 ||
533 (rx_needed && cdata.rx_lcore_mask == 0) ||
534 cdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0
535 && !(eventdev_info.event_dev_cap &
536 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
537 printf("Core part of pipeline was not assigned any cores. "
538 "This will stall the pipeline, please check core masks "
539 "(use -h for details on setting core masks):\n"
540 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
541 "\n\tworkers: %"PRIu64"\n",
542 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
543 cdata.sched_lcore_mask,
544 cdata.worker_lcore_mask);
545 rte_exit(-1, "Fix core masks\n");
548 if (eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
549 memset(fdata->sched_core, 0,
550 sizeof(unsigned int) * MAX_NUM_CORE);
554 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
557 caps->consumer = consumer_burst;
558 caps->worker = worker_generic_burst;
560 caps->consumer = consumer;
561 caps->worker = worker_generic;
564 caps->adptr_setup = init_rx_adapter;
565 caps->scheduler = schedule_devices;
566 caps->evdev_setup = setup_eventdev_generic;
567 caps->check_opt = generic_opt_check;