2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline void
10 worker_fwd_event(struct rte_event *ev, uint8_t sched)
12 ev->event_type = RTE_EVENT_TYPE_CPU;
13 ev->op = RTE_EVENT_OP_FORWARD;
14 ev->sched_type = sched;
17 static __rte_always_inline void
18 worker_event_enqueue(const uint8_t dev, const uint8_t port,
21 while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
25 static __rte_always_inline void
26 worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
27 struct rte_event *ev, const uint16_t nb_rx)
31 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
33 enq += rte_event_enqueue_burst(dev, port,
34 ev + enq, nb_rx - enq);
38 static __rte_always_inline void
39 worker_tx_pkt(struct rte_mbuf *mbuf)
42 while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
46 /* Multi stage Pipeline Workers */
49 worker_do_tx(void *arg)
53 struct worker_data *data = (struct worker_data *)arg;
54 const uint8_t dev = data->dev_id;
55 const uint8_t port = data->port_id;
56 const uint8_t lst_qid = cdata.num_stages - 1;
57 size_t fwd = 0, received = 0, tx = 0;
60 while (!fdata->done) {
62 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
68 const uint8_t cq_id = ev.queue_id % cdata.num_stages;
70 if (cq_id >= lst_qid) {
71 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
72 worker_tx_pkt(ev.mbuf);
77 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
78 ev.queue_id = (cq_id == lst_qid) ?
79 cdata.next_qid[ev.queue_id] : ev.queue_id;
81 ev.queue_id = cdata.next_qid[ev.queue_id];
82 worker_fwd_event(&ev, cdata.queue_type);
86 worker_event_enqueue(dev, port, &ev);
91 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
92 rte_lcore_id(), received, fwd, tx);
98 worker_do_tx_burst(void *arg)
100 struct rte_event ev[BATCH_SIZE];
102 struct worker_data *data = (struct worker_data *)arg;
103 uint8_t dev = data->dev_id;
104 uint8_t port = data->port_id;
105 uint8_t lst_qid = cdata.num_stages - 1;
106 size_t fwd = 0, received = 0, tx = 0;
108 while (!fdata->done) {
110 const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
119 for (i = 0; i < nb_rx; i++) {
120 const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
122 if (cq_id >= lst_qid) {
123 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
124 worker_tx_pkt(ev[i].mbuf);
126 ev[i].op = RTE_EVENT_OP_RELEASE;
129 ev[i].queue_id = (cq_id == lst_qid) ?
130 cdata.next_qid[ev[i].queue_id] :
133 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
135 ev[i].queue_id = cdata.next_qid[ev[i].queue_id];
136 worker_fwd_event(&ev[i], cdata.queue_type);
140 worker_event_enqueue_burst(dev, port, ev, nb_rx);
146 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
147 rte_lcore_id(), received, fwd, tx);
153 setup_eventdev_worker_tx(struct cons_data *cons_data,
154 struct worker_data *worker_data)
156 RTE_SET_USED(cons_data);
158 const uint8_t dev_id = 0;
159 const uint8_t nb_ports = cdata.num_workers;
160 uint8_t nb_slots = 0;
161 uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
162 nb_queues += rte_eth_dev_count();
164 struct rte_event_dev_config config = {
165 .nb_event_queues = nb_queues,
166 .nb_event_ports = nb_ports,
167 .nb_events_limit = 4096,
168 .nb_event_queue_flows = 1024,
169 .nb_event_port_dequeue_depth = 128,
170 .nb_event_port_enqueue_depth = 128,
172 struct rte_event_port_conf wkr_p_conf = {
173 .dequeue_depth = cdata.worker_cq_depth,
175 .new_event_threshold = 4096,
177 struct rte_event_queue_conf wkr_q_conf = {
178 .schedule_type = cdata.queue_type,
179 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
180 .nb_atomic_flows = 1024,
181 .nb_atomic_order_sequences = 1024,
184 int ret, ndev = rte_event_dev_count();
187 printf("%d: No Eventdev Devices Found\n", __LINE__);
192 struct rte_event_dev_info dev_info;
193 ret = rte_event_dev_info_get(dev_id, &dev_info);
194 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
196 if (dev_info.max_event_port_dequeue_depth <
197 config.nb_event_port_dequeue_depth)
198 config.nb_event_port_dequeue_depth =
199 dev_info.max_event_port_dequeue_depth;
200 if (dev_info.max_event_port_enqueue_depth <
201 config.nb_event_port_enqueue_depth)
202 config.nb_event_port_enqueue_depth =
203 dev_info.max_event_port_enqueue_depth;
205 ret = rte_event_dev_configure(dev_id, &config);
207 printf("%d: Error configuring device\n", __LINE__);
211 printf(" Stages:\n");
212 for (i = 0; i < nb_queues; i++) {
216 nb_slots = cdata.num_stages + 1;
218 wkr_q_conf.schedule_type = slot == cdata.num_stages ?
219 RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
221 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
222 printf("%d: error creating qid %d\n", __LINE__, i);
226 cdata.next_qid[i] = i+1;
227 if (cdata.enable_queue_priorities) {
228 const uint32_t prio_delta =
229 (RTE_EVENT_DEV_PRIORITY_LOWEST) /
232 /* higher priority for queues closer to tx */
233 wkr_q_conf.priority =
234 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
238 const char *type_str = "Atomic";
239 switch (wkr_q_conf.schedule_type) {
240 case RTE_SCHED_TYPE_ORDERED:
241 type_str = "Ordered";
243 case RTE_SCHED_TYPE_PARALLEL:
244 type_str = "Parallel";
247 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
248 wkr_q_conf.priority);
252 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
253 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
254 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
255 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
257 /* set up one port per worker, linking to all stage queues */
258 for (i = 0; i < cdata.num_workers; i++) {
259 struct worker_data *w = &worker_data[i];
261 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
262 printf("Error setting up port %d\n", i);
266 if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
268 printf("%d: error creating link for port %d\n",
275 * Reduce the load on ingress event queue by splitting the traffic
276 * across multiple event queues.
277 * for example, nb_stages = 2 and nb_ethdev = 2 then
279 * nb_queues = (2 * 2) + 2 = 6 (non atq)
282 * So, traffic is split across queue 0 and queue 3 since queue id for
283 * rx adapter is chosen <ethport_id> * <rx_stride> i.e in the above
284 * case eth port 0, 1 will inject packets into event queue 0, 3
287 * This forms two set of queue pipelines 0->1->2->tx and 3->4->5->tx.
289 cdata.rx_stride = nb_slots;
290 ret = rte_event_dev_service_id_get(dev_id,
291 &fdata->evdev_service_id);
292 if (ret != -ESRCH && ret != 0) {
293 printf("Error getting the service ID\n");
296 rte_service_runstate_set(fdata->evdev_service_id, 1);
297 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
298 if (rte_event_dev_start(dev_id) < 0) {
299 printf("Error starting eventdev\n");
307 struct rx_adptr_services {
308 uint16_t nb_rx_adptrs;
309 uint32_t *rx_adpt_arr;
313 service_rx_adapter(void *arg)
316 struct rx_adptr_services *adptr_services = arg;
318 for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
319 rte_service_run_iter_on_app_lcore(
320 adptr_services->rx_adpt_arr[i], 1);
325 init_rx_adapter(uint16_t nb_ports)
329 uint8_t evdev_id = 0;
330 struct rx_adptr_services *adptr_services = NULL;
331 struct rte_event_dev_info dev_info;
333 ret = rte_event_dev_info_get(evdev_id, &dev_info);
334 adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
336 struct rte_event_port_conf rx_p_conf = {
339 .new_event_threshold = 1200,
342 if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
343 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
344 if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
345 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
348 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
349 .ev.sched_type = cdata.queue_type,
352 for (i = 0; i < nb_ports; i++) {
356 ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
358 rte_exit(EXIT_FAILURE,
359 "failed to create rx adapter[%d]",
360 cdata.rx_adapter_id);
362 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
364 rte_exit(EXIT_FAILURE,
365 "failed to get event rx adapter "
368 queue_conf.ev.queue_id = cdata.rx_stride ?
369 (i * cdata.rx_stride)
370 : (uint8_t)cdata.qid[0];
372 ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
374 rte_exit(EXIT_FAILURE,
375 "Failed to add queues to Rx adapter");
378 /* Producer needs to be scheduled. */
379 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
380 ret = rte_event_eth_rx_adapter_service_id_get(i,
382 if (ret != -ESRCH && ret != 0) {
383 rte_exit(EXIT_FAILURE,
384 "Error getting the service ID for rx adptr\n");
387 rte_service_runstate_set(service_id, 1);
388 rte_service_set_runstate_mapped_check(service_id, 0);
390 adptr_services->nb_rx_adptrs++;
391 adptr_services->rx_adpt_arr = rte_realloc(
392 adptr_services->rx_adpt_arr,
393 adptr_services->nb_rx_adptrs *
394 sizeof(uint32_t), 0);
395 adptr_services->rx_adpt_arr[
396 adptr_services->nb_rx_adptrs - 1] =
400 ret = rte_event_eth_rx_adapter_start(i);
402 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
403 cdata.rx_adapter_id);
406 if (adptr_services->nb_rx_adptrs) {
407 struct rte_service_spec service;
409 memset(&service, 0, sizeof(struct rte_service_spec));
410 snprintf(service.name, sizeof(service.name), "rx_service");
411 service.callback = service_rx_adapter;
412 service.callback_userdata = (void *)adptr_services;
414 int32_t ret = rte_service_component_register(&service,
415 &fdata->rxadptr_service_id);
417 rte_exit(EXIT_FAILURE,
418 "Rx adapter[%d] service register failed",
419 cdata.rx_adapter_id);
421 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
422 rte_service_component_runstate_set(fdata->rxadptr_service_id,
424 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
427 memset(fdata->rx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
428 rte_free(adptr_services);
431 if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer == NULL &&
432 (dev_info.event_dev_cap &
433 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
434 fdata->cap.scheduler = NULL;
436 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
437 memset(fdata->sched_core, 0,
438 sizeof(unsigned int) * MAX_NUM_CORE);
442 worker_tx_opt_check(void)
447 uint8_t rx_needed = 0;
448 struct rte_event_dev_info eventdev_info;
450 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
451 rte_event_dev_info_get(0, &eventdev_info);
453 for (i = 0; i < rte_eth_dev_count(); i++) {
454 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
456 rte_exit(EXIT_FAILURE,
457 "failed to get event rx adapter "
460 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
463 if (cdata.worker_lcore_mask == 0 ||
464 (rx_needed && cdata.rx_lcore_mask == 0) ||
465 (cdata.sched_lcore_mask == 0 &&
466 !(eventdev_info.event_dev_cap &
467 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
468 printf("Core part of pipeline was not assigned any cores. "
469 "This will stall the pipeline, please check core masks "
470 "(use -h for details on setting core masks):\n"
471 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
472 "\n\tworkers: %"PRIu64"\n",
473 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
474 cdata.sched_lcore_mask,
475 cdata.worker_lcore_mask);
476 rte_exit(-1, "Fix core masks\n");
481 set_worker_tx_setup_data(struct setup_data *caps, bool burst)
484 caps->worker = worker_do_tx_burst;
486 caps->worker = worker_do_tx;
488 memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
490 caps->check_opt = worker_tx_opt_check;
491 caps->consumer = NULL;
492 caps->scheduler = schedule_devices;
493 caps->evdev_setup = setup_eventdev_worker_tx;
494 caps->adptr_setup = init_rx_adapter;