2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline void
10 worker_fwd_event(struct rte_event *ev, uint8_t sched)
12 ev->event_type = RTE_EVENT_TYPE_CPU;
13 ev->op = RTE_EVENT_OP_FORWARD;
14 ev->sched_type = sched;
17 static __rte_always_inline void
18 worker_event_enqueue(const uint8_t dev, const uint8_t port,
21 while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
25 static __rte_always_inline void
26 worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
27 struct rte_event *ev, const uint16_t nb_rx)
31 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
33 enq += rte_event_enqueue_burst(dev, port,
34 ev + enq, nb_rx - enq);
38 static __rte_always_inline void
39 worker_tx_pkt(struct rte_mbuf *mbuf)
42 while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
46 /* Multi stage Pipeline Workers */
49 worker_do_tx(void *arg)
53 struct worker_data *data = (struct worker_data *)arg;
54 const uint8_t dev = data->dev_id;
55 const uint8_t port = data->port_id;
56 const uint8_t lst_qid = cdata.num_stages - 1;
57 size_t fwd = 0, received = 0, tx = 0;
60 while (!fdata->done) {
62 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
68 const uint8_t cq_id = ev.queue_id % cdata.num_stages;
70 if (cq_id >= lst_qid) {
71 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
72 worker_tx_pkt(ev.mbuf);
77 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
78 ev.queue_id = (cq_id == lst_qid) ?
79 cdata.next_qid[ev.queue_id] : ev.queue_id;
81 ev.queue_id = cdata.next_qid[ev.queue_id];
82 worker_fwd_event(&ev, cdata.queue_type);
86 worker_event_enqueue(dev, port, &ev);
91 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
92 rte_lcore_id(), received, fwd, tx);
98 worker_do_tx_atq(void *arg)
102 struct worker_data *data = (struct worker_data *)arg;
103 const uint8_t dev = data->dev_id;
104 const uint8_t port = data->port_id;
105 const uint8_t lst_qid = cdata.num_stages - 1;
106 size_t fwd = 0, received = 0, tx = 0;
108 while (!fdata->done) {
110 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
116 const uint8_t cq_id = ev.sub_event_type % cdata.num_stages;
118 if (cq_id == lst_qid) {
119 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
120 worker_tx_pkt(ev.mbuf);
125 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
128 worker_fwd_event(&ev, cdata.queue_type);
132 worker_event_enqueue(dev, port, &ev);
137 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
138 rte_lcore_id(), received, fwd, tx);
144 worker_do_tx_burst(void *arg)
146 struct rte_event ev[BATCH_SIZE];
148 struct worker_data *data = (struct worker_data *)arg;
149 uint8_t dev = data->dev_id;
150 uint8_t port = data->port_id;
151 uint8_t lst_qid = cdata.num_stages - 1;
152 size_t fwd = 0, received = 0, tx = 0;
154 while (!fdata->done) {
156 const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
165 for (i = 0; i < nb_rx; i++) {
166 const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
168 if (cq_id >= lst_qid) {
169 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
170 worker_tx_pkt(ev[i].mbuf);
172 ev[i].op = RTE_EVENT_OP_RELEASE;
175 ev[i].queue_id = (cq_id == lst_qid) ?
176 cdata.next_qid[ev[i].queue_id] :
179 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
181 ev[i].queue_id = cdata.next_qid[ev[i].queue_id];
182 worker_fwd_event(&ev[i], cdata.queue_type);
186 worker_event_enqueue_burst(dev, port, ev, nb_rx);
192 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
193 rte_lcore_id(), received, fwd, tx);
199 worker_do_tx_burst_atq(void *arg)
201 struct rte_event ev[BATCH_SIZE];
203 struct worker_data *data = (struct worker_data *)arg;
204 uint8_t dev = data->dev_id;
205 uint8_t port = data->port_id;
206 uint8_t lst_qid = cdata.num_stages - 1;
207 size_t fwd = 0, received = 0, tx = 0;
209 while (!fdata->done) {
212 const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
221 for (i = 0; i < nb_rx; i++) {
222 const uint8_t cq_id = ev[i].sub_event_type %
225 if (cq_id == lst_qid) {
226 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
227 worker_tx_pkt(ev[i].mbuf);
229 ev[i].op = RTE_EVENT_OP_RELEASE;
233 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
235 ev[i].sub_event_type++;
236 worker_fwd_event(&ev[i], cdata.queue_type);
241 worker_event_enqueue_burst(dev, port, ev, nb_rx);
246 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
247 rte_lcore_id(), received, fwd, tx);
253 setup_eventdev_worker_tx(struct cons_data *cons_data,
254 struct worker_data *worker_data)
256 RTE_SET_USED(cons_data);
258 const uint8_t atq = cdata.all_type_queues ? 1 : 0;
259 const uint8_t dev_id = 0;
260 const uint8_t nb_ports = cdata.num_workers;
261 uint8_t nb_slots = 0;
262 uint8_t nb_queues = rte_eth_dev_count();
265 * In case where all type queues are not enabled, use queues equal to
266 * number of stages * eth_dev_count and one extra queue per pipeline
270 nb_queues *= cdata.num_stages;
271 nb_queues += rte_eth_dev_count();
274 struct rte_event_dev_config config = {
275 .nb_event_queues = nb_queues,
276 .nb_event_ports = nb_ports,
277 .nb_events_limit = 4096,
278 .nb_event_queue_flows = 1024,
279 .nb_event_port_dequeue_depth = 128,
280 .nb_event_port_enqueue_depth = 128,
282 struct rte_event_port_conf wkr_p_conf = {
283 .dequeue_depth = cdata.worker_cq_depth,
285 .new_event_threshold = 4096,
287 struct rte_event_queue_conf wkr_q_conf = {
288 .schedule_type = cdata.queue_type,
289 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
290 .nb_atomic_flows = 1024,
291 .nb_atomic_order_sequences = 1024,
294 int ret, ndev = rte_event_dev_count();
297 printf("%d: No Eventdev Devices Found\n", __LINE__);
302 struct rte_event_dev_info dev_info;
303 ret = rte_event_dev_info_get(dev_id, &dev_info);
304 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
306 if (dev_info.max_event_port_dequeue_depth <
307 config.nb_event_port_dequeue_depth)
308 config.nb_event_port_dequeue_depth =
309 dev_info.max_event_port_dequeue_depth;
310 if (dev_info.max_event_port_enqueue_depth <
311 config.nb_event_port_enqueue_depth)
312 config.nb_event_port_enqueue_depth =
313 dev_info.max_event_port_enqueue_depth;
315 ret = rte_event_dev_configure(dev_id, &config);
317 printf("%d: Error configuring device\n", __LINE__);
321 printf(" Stages:\n");
322 for (i = 0; i < nb_queues; i++) {
326 nb_slots = cdata.num_stages;
327 wkr_q_conf.event_queue_cfg =
328 RTE_EVENT_QUEUE_CFG_ALL_TYPES;
332 nb_slots = cdata.num_stages + 1;
334 wkr_q_conf.schedule_type = slot == cdata.num_stages ?
335 RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
338 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
339 printf("%d: error creating qid %d\n", __LINE__, i);
343 cdata.next_qid[i] = i+1;
344 if (cdata.enable_queue_priorities) {
345 const uint32_t prio_delta =
346 (RTE_EVENT_DEV_PRIORITY_LOWEST) /
349 /* higher priority for queues closer to tx */
350 wkr_q_conf.priority =
351 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
355 const char *type_str = "Atomic";
356 switch (wkr_q_conf.schedule_type) {
357 case RTE_SCHED_TYPE_ORDERED:
358 type_str = "Ordered";
360 case RTE_SCHED_TYPE_PARALLEL:
361 type_str = "Parallel";
364 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
365 wkr_q_conf.priority);
369 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
370 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
371 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
372 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
374 /* set up one port per worker, linking to all stage queues */
375 for (i = 0; i < cdata.num_workers; i++) {
376 struct worker_data *w = &worker_data[i];
378 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
379 printf("Error setting up port %d\n", i);
383 if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
385 printf("%d: error creating link for port %d\n",
392 * Reduce the load on ingress event queue by splitting the traffic
393 * across multiple event queues.
394 * for example, nb_stages = 2 and nb_ethdev = 2 then
396 * nb_queues = (2 * 2) + 2 = 6 (non atq)
399 * So, traffic is split across queue 0 and queue 3 since queue id for
400 * rx adapter is chosen <ethport_id> * <rx_stride> i.e in the above
401 * case eth port 0, 1 will inject packets into event queue 0, 3
404 * This forms two set of queue pipelines 0->1->2->tx and 3->4->5->tx.
406 cdata.rx_stride = atq ? 1 : nb_slots;
407 ret = rte_event_dev_service_id_get(dev_id,
408 &fdata->evdev_service_id);
409 if (ret != -ESRCH && ret != 0) {
410 printf("Error getting the service ID\n");
413 rte_service_runstate_set(fdata->evdev_service_id, 1);
414 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
415 if (rte_event_dev_start(dev_id) < 0) {
416 printf("Error starting eventdev\n");
424 struct rx_adptr_services {
425 uint16_t nb_rx_adptrs;
426 uint32_t *rx_adpt_arr;
430 service_rx_adapter(void *arg)
433 struct rx_adptr_services *adptr_services = arg;
435 for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
436 rte_service_run_iter_on_app_lcore(
437 adptr_services->rx_adpt_arr[i], 1);
442 init_rx_adapter(uint16_t nb_ports)
446 uint8_t evdev_id = 0;
447 struct rx_adptr_services *adptr_services = NULL;
448 struct rte_event_dev_info dev_info;
450 ret = rte_event_dev_info_get(evdev_id, &dev_info);
451 adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
453 struct rte_event_port_conf rx_p_conf = {
456 .new_event_threshold = 1200,
459 if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
460 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
461 if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
462 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
465 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
466 .ev.sched_type = cdata.queue_type,
469 for (i = 0; i < nb_ports; i++) {
473 ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
475 rte_exit(EXIT_FAILURE,
476 "failed to create rx adapter[%d]",
477 cdata.rx_adapter_id);
479 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
481 rte_exit(EXIT_FAILURE,
482 "failed to get event rx adapter "
485 queue_conf.ev.queue_id = cdata.rx_stride ?
486 (i * cdata.rx_stride)
487 : (uint8_t)cdata.qid[0];
489 ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
491 rte_exit(EXIT_FAILURE,
492 "Failed to add queues to Rx adapter");
495 /* Producer needs to be scheduled. */
496 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
497 ret = rte_event_eth_rx_adapter_service_id_get(i,
499 if (ret != -ESRCH && ret != 0) {
500 rte_exit(EXIT_FAILURE,
501 "Error getting the service ID for rx adptr\n");
504 rte_service_runstate_set(service_id, 1);
505 rte_service_set_runstate_mapped_check(service_id, 0);
507 adptr_services->nb_rx_adptrs++;
508 adptr_services->rx_adpt_arr = rte_realloc(
509 adptr_services->rx_adpt_arr,
510 adptr_services->nb_rx_adptrs *
511 sizeof(uint32_t), 0);
512 adptr_services->rx_adpt_arr[
513 adptr_services->nb_rx_adptrs - 1] =
517 ret = rte_event_eth_rx_adapter_start(i);
519 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
520 cdata.rx_adapter_id);
523 if (adptr_services->nb_rx_adptrs) {
524 struct rte_service_spec service;
526 memset(&service, 0, sizeof(struct rte_service_spec));
527 snprintf(service.name, sizeof(service.name), "rx_service");
528 service.callback = service_rx_adapter;
529 service.callback_userdata = (void *)adptr_services;
531 int32_t ret = rte_service_component_register(&service,
532 &fdata->rxadptr_service_id);
534 rte_exit(EXIT_FAILURE,
535 "Rx adapter[%d] service register failed",
536 cdata.rx_adapter_id);
538 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
539 rte_service_component_runstate_set(fdata->rxadptr_service_id,
541 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
544 memset(fdata->rx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
545 rte_free(adptr_services);
548 if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer == NULL &&
549 (dev_info.event_dev_cap &
550 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
551 fdata->cap.scheduler = NULL;
553 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
554 memset(fdata->sched_core, 0,
555 sizeof(unsigned int) * MAX_NUM_CORE);
559 worker_tx_opt_check(void)
564 uint8_t rx_needed = 0;
565 struct rte_event_dev_info eventdev_info;
567 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
568 rte_event_dev_info_get(0, &eventdev_info);
570 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
571 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
572 rte_exit(EXIT_FAILURE,
573 "Event dev doesn't support all type queues\n");
575 for (i = 0; i < rte_eth_dev_count(); i++) {
576 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
578 rte_exit(EXIT_FAILURE,
579 "failed to get event rx adapter "
582 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
585 if (cdata.worker_lcore_mask == 0 ||
586 (rx_needed && cdata.rx_lcore_mask == 0) ||
587 (cdata.sched_lcore_mask == 0 &&
588 !(eventdev_info.event_dev_cap &
589 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
590 printf("Core part of pipeline was not assigned any cores. "
591 "This will stall the pipeline, please check core masks "
592 "(use -h for details on setting core masks):\n"
593 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
594 "\n\tworkers: %"PRIu64"\n",
595 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
596 cdata.sched_lcore_mask,
597 cdata.worker_lcore_mask);
598 rte_exit(-1, "Fix core masks\n");
603 get_worker_loop_burst(uint8_t atq)
606 return worker_do_tx_burst_atq;
608 return worker_do_tx_burst;
612 get_worker_loop_non_burst(uint8_t atq)
615 return worker_do_tx_atq;
621 set_worker_tx_setup_data(struct setup_data *caps, bool burst)
623 uint8_t atq = cdata.all_type_queues ? 1 : 0;
626 caps->worker = get_worker_loop_burst(atq);
628 caps->worker = get_worker_loop_non_burst(atq);
630 memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
632 caps->check_opt = worker_tx_opt_check;
633 caps->consumer = NULL;
634 caps->scheduler = schedule_devices;
635 caps->evdev_setup = setup_eventdev_worker_tx;
636 caps->adptr_setup = init_rx_adapter;