2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline int
10 worker_generic(void *arg)
14 struct worker_data *data = (struct worker_data *)arg;
15 uint8_t dev_id = data->dev_id;
16 uint8_t port_id = data->port_id;
17 size_t sent = 0, received = 0;
18 unsigned int lcore_id = rte_lcore_id();
20 while (!fdata->done) {
22 if (fdata->cap.scheduler)
23 fdata->cap.scheduler(lcore_id);
25 if (!fdata->worker_core[lcore_id]) {
30 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
39 /* The first worker stage does classification */
40 if (ev.queue_id == cdata.qid[0])
41 ev.flow_id = ev.mbuf->hash.rss
44 ev.queue_id = cdata.next_qid[ev.queue_id];
45 ev.op = RTE_EVENT_OP_FORWARD;
46 ev.sched_type = cdata.queue_type;
50 while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
56 printf(" worker %u thread done. RX=%zu TX=%zu\n",
57 rte_lcore_id(), received, sent);
63 worker_generic_burst(void *arg)
65 struct rte_event events[BATCH_SIZE];
67 struct worker_data *data = (struct worker_data *)arg;
68 uint8_t dev_id = data->dev_id;
69 uint8_t port_id = data->port_id;
70 size_t sent = 0, received = 0;
71 unsigned int lcore_id = rte_lcore_id();
73 while (!fdata->done) {
76 if (fdata->cap.scheduler)
77 fdata->cap.scheduler(lcore_id);
79 if (!fdata->worker_core[lcore_id]) {
84 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 events, RTE_DIM(events), 0);
93 for (i = 0; i < nb_rx; i++) {
95 /* The first worker stage does classification */
96 if (events[i].queue_id == cdata.qid[0])
97 events[i].flow_id = events[i].mbuf->hash.rss
100 events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 events[i].op = RTE_EVENT_OP_FORWARD;
102 events[i].sched_type = cdata.queue_type;
106 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
108 while (nb_tx < nb_rx && !fdata->done)
109 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
116 printf(" worker %u thread done. RX=%zu TX=%zu\n",
117 rte_lcore_id(), received, sent);
123 setup_eventdev_generic(struct worker_data *worker_data)
125 const uint8_t dev_id = 0;
126 /* +1 stages is for a SINGLE_LINK TX stage */
127 const uint8_t nb_queues = cdata.num_stages + 1;
128 const uint8_t nb_ports = cdata.num_workers;
129 struct rte_event_dev_config config = {
130 .nb_event_queues = nb_queues,
131 .nb_event_ports = nb_ports,
132 .nb_events_limit = 4096,
133 .nb_event_queue_flows = 1024,
134 .nb_event_port_dequeue_depth = 128,
135 .nb_event_port_enqueue_depth = 128,
137 struct rte_event_port_conf wkr_p_conf = {
138 .dequeue_depth = cdata.worker_cq_depth,
140 .new_event_threshold = 4096,
142 struct rte_event_queue_conf wkr_q_conf = {
143 .schedule_type = cdata.queue_type,
144 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
145 .nb_atomic_flows = 1024,
146 .nb_atomic_order_sequences = 1024,
148 struct rte_event_queue_conf tx_q_conf = {
149 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
150 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
153 struct port_link worker_queues[MAX_NUM_STAGES];
154 uint8_t disable_implicit_release;
157 int ret, ndev = rte_event_dev_count();
159 printf("%d: No Eventdev Devices Found\n", __LINE__);
163 struct rte_event_dev_info dev_info;
164 ret = rte_event_dev_info_get(dev_id, &dev_info);
165 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
167 disable_implicit_release = (dev_info.event_dev_cap &
168 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
170 wkr_p_conf.disable_implicit_release = disable_implicit_release;
172 if (dev_info.max_event_port_dequeue_depth <
173 config.nb_event_port_dequeue_depth)
174 config.nb_event_port_dequeue_depth =
175 dev_info.max_event_port_dequeue_depth;
176 if (dev_info.max_event_port_enqueue_depth <
177 config.nb_event_port_enqueue_depth)
178 config.nb_event_port_enqueue_depth =
179 dev_info.max_event_port_enqueue_depth;
181 ret = rte_event_dev_configure(dev_id, &config);
183 printf("%d: Error configuring device\n", __LINE__);
187 /* Q creation - one load balanced per pipeline stage*/
188 printf(" Stages:\n");
189 for (i = 0; i < cdata.num_stages; i++) {
190 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
191 printf("%d: error creating qid %d\n", __LINE__, i);
195 cdata.next_qid[i] = i+1;
196 worker_queues[i].queue_id = i;
197 if (cdata.enable_queue_priorities) {
198 /* calculate priority stepping for each stage, leaving
199 * headroom of 1 for the SINGLE_LINK TX below
201 const uint32_t prio_delta =
202 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
204 /* higher priority for queues closer to tx */
205 wkr_q_conf.priority =
206 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
209 const char *type_str = "Atomic";
210 switch (wkr_q_conf.schedule_type) {
211 case RTE_SCHED_TYPE_ORDERED:
212 type_str = "Ordered";
214 case RTE_SCHED_TYPE_PARALLEL:
215 type_str = "Parallel";
218 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
219 wkr_q_conf.priority);
223 /* final queue for sending to TX core */
224 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
225 printf("%d: error creating qid %d\n", __LINE__, i);
228 cdata.tx_queue_id = i;
230 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
231 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
232 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
233 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
235 /* set up one port per worker, linking to all stage queues */
236 for (i = 0; i < cdata.num_workers; i++) {
237 struct worker_data *w = &worker_data[i];
239 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
240 printf("Error setting up port %d\n", i);
245 for (s = 0; s < cdata.num_stages; s++) {
246 if (rte_event_port_link(dev_id, i,
247 &worker_queues[s].queue_id,
248 &worker_queues[s].priority,
250 printf("%d: error creating link for port %d\n",
258 ret = rte_event_dev_service_id_get(dev_id,
259 &fdata->evdev_service_id);
260 if (ret != -ESRCH && ret != 0) {
261 printf("Error getting the service ID for sw eventdev\n");
264 rte_service_runstate_set(fdata->evdev_service_id, 1);
265 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
271 init_adapters(uint16_t nb_ports)
275 uint8_t tx_port_id = 0;
276 uint8_t evdev_id = 0;
277 struct rte_event_dev_info dev_info;
279 ret = rte_event_dev_info_get(evdev_id, &dev_info);
281 struct rte_event_port_conf adptr_p_conf = {
282 .dequeue_depth = cdata.worker_cq_depth,
284 .new_event_threshold = 4096,
287 if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
288 adptr_p_conf.dequeue_depth =
289 dev_info.max_event_port_dequeue_depth;
290 if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
291 adptr_p_conf.enqueue_depth =
292 dev_info.max_event_port_enqueue_depth;
294 /* Create one adapter for all the ethernet ports. */
295 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
298 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
299 cdata.rx_adapter_id);
301 ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
304 rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
305 cdata.tx_adapter_id);
307 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
308 memset(&queue_conf, 0, sizeof(queue_conf));
309 queue_conf.ev.sched_type = cdata.queue_type;
310 queue_conf.ev.queue_id = cdata.qid[0];
312 for (i = 0; i < nb_ports; i++) {
313 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
316 rte_exit(EXIT_FAILURE,
317 "Failed to add queues to Rx adapter");
319 ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
322 rte_exit(EXIT_FAILURE,
323 "Failed to add queues to Tx adapter");
326 ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
329 rte_exit(EXIT_FAILURE,
330 "Failed to get Tx adapter port id");
331 ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
334 rte_exit(EXIT_FAILURE,
335 "Unable to link Tx adapter port to Tx queue");
337 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
338 &fdata->rxadptr_service_id);
339 if (ret != -ESRCH && ret != 0) {
340 rte_exit(EXIT_FAILURE,
341 "Error getting the service ID for Rx adapter\n");
343 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
344 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
346 ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
347 &fdata->txadptr_service_id);
348 if (ret != -ESRCH && ret != 0) {
349 rte_exit(EXIT_FAILURE,
350 "Error getting the service ID for Tx adapter\n");
352 rte_service_runstate_set(fdata->txadptr_service_id, 1);
353 rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
355 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
357 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
358 cdata.rx_adapter_id);
360 ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
362 rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
363 cdata.tx_adapter_id);
365 if (rte_event_dev_start(evdev_id) < 0)
366 rte_exit(EXIT_FAILURE, "Error starting eventdev");
370 generic_opt_check(void)
375 uint8_t rx_needed = 0;
376 uint8_t sched_needed = 0;
377 struct rte_event_dev_info eventdev_info;
379 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
380 rte_event_dev_info_get(0, &eventdev_info);
382 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
383 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
384 rte_exit(EXIT_FAILURE,
385 "Event dev doesn't support all type queues\n");
386 sched_needed = !(eventdev_info.event_dev_cap &
387 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
389 RTE_ETH_FOREACH_DEV(i) {
390 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
392 rte_exit(EXIT_FAILURE,
393 "failed to get event rx adapter capabilities");
395 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
398 if (cdata.worker_lcore_mask == 0 ||
399 (rx_needed && cdata.rx_lcore_mask == 0) ||
400 (cdata.tx_lcore_mask == 0) ||
401 (sched_needed && cdata.sched_lcore_mask == 0)) {
402 printf("Core part of pipeline was not assigned any cores. "
403 "This will stall the pipeline, please check core masks "
404 "(use -h for details on setting core masks):\n"
405 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
406 "\n\tworkers: %"PRIu64"\n",
407 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
408 cdata.sched_lcore_mask,
409 cdata.worker_lcore_mask);
410 rte_exit(-1, "Fix core masks\n");
414 memset(fdata->sched_core, 0,
415 sizeof(unsigned int) * MAX_NUM_CORE);
417 memset(fdata->rx_core, 0,
418 sizeof(unsigned int) * MAX_NUM_CORE);
422 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
425 caps->worker = worker_generic_burst;
427 caps->worker = worker_generic;
430 caps->adptr_setup = init_adapters;
431 caps->scheduler = schedule_devices;
432 caps->evdev_setup = setup_eventdev_generic;
433 caps->check_opt = generic_opt_check;