2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline int
10 worker_generic(void *arg)
14 struct worker_data *data = (struct worker_data *)arg;
15 uint8_t dev_id = data->dev_id;
16 uint8_t port_id = data->port_id;
17 size_t sent = 0, received = 0;
18 unsigned int lcore_id = rte_lcore_id();
20 while (!fdata->done) {
22 if (fdata->cap.scheduler)
23 fdata->cap.scheduler(lcore_id);
25 if (!fdata->worker_core[lcore_id]) {
30 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
39 /* The first worker stage does classification */
40 if (ev.queue_id == cdata.qid[0])
41 ev.flow_id = ev.mbuf->hash.rss
44 ev.queue_id = cdata.next_qid[ev.queue_id];
45 ev.op = RTE_EVENT_OP_FORWARD;
46 ev.sched_type = cdata.queue_type;
50 while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
56 printf(" worker %u thread done. RX=%zu TX=%zu\n",
57 rte_lcore_id(), received, sent);
63 worker_generic_burst(void *arg)
65 struct rte_event events[BATCH_SIZE];
67 struct worker_data *data = (struct worker_data *)arg;
68 uint8_t dev_id = data->dev_id;
69 uint8_t port_id = data->port_id;
70 size_t sent = 0, received = 0;
71 unsigned int lcore_id = rte_lcore_id();
73 while (!fdata->done) {
76 if (fdata->cap.scheduler)
77 fdata->cap.scheduler(lcore_id);
79 if (!fdata->worker_core[lcore_id]) {
84 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 events, RTE_DIM(events), 0);
93 for (i = 0; i < nb_rx; i++) {
95 /* The first worker stage does classification */
96 if (events[i].queue_id == cdata.qid[0])
97 events[i].flow_id = events[i].mbuf->hash.rss
100 events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 events[i].op = RTE_EVENT_OP_FORWARD;
102 events[i].sched_type = cdata.queue_type;
106 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
108 while (nb_tx < nb_rx && !fdata->done)
109 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
116 printf(" worker %u thread done. RX=%zu TX=%zu\n",
117 rte_lcore_id(), received, sent);
123 setup_eventdev_generic(struct worker_data *worker_data)
125 const uint8_t dev_id = 0;
126 /* +1 stages is for a SINGLE_LINK TX stage */
127 const uint8_t nb_queues = cdata.num_stages + 1;
128 const uint8_t nb_ports = cdata.num_workers;
129 struct rte_event_dev_config config = {
130 .nb_event_queues = nb_queues,
131 .nb_event_ports = nb_ports,
132 .nb_single_link_event_port_queues = 1,
133 .nb_events_limit = 4096,
134 .nb_event_queue_flows = 1024,
135 .nb_event_port_dequeue_depth = 128,
136 .nb_event_port_enqueue_depth = 128,
138 struct rte_event_port_conf wkr_p_conf = {
139 .dequeue_depth = cdata.worker_cq_depth,
141 .new_event_threshold = 4096,
142 .event_port_cfg = RTE_EVENT_PORT_CFG_HINT_WORKER,
144 struct rte_event_queue_conf wkr_q_conf = {
145 .schedule_type = cdata.queue_type,
146 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
147 .nb_atomic_flows = 1024,
148 .nb_atomic_order_sequences = 1024,
150 struct rte_event_queue_conf tx_q_conf = {
151 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
152 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
155 struct port_link worker_queues[MAX_NUM_STAGES];
156 uint8_t disable_implicit_release;
159 int ret, ndev = rte_event_dev_count();
161 printf("%d: No Eventdev Devices Found\n", __LINE__);
165 struct rte_event_dev_info dev_info;
166 ret = rte_event_dev_info_get(dev_id, &dev_info);
167 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
169 disable_implicit_release = (dev_info.event_dev_cap &
170 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
172 wkr_p_conf.event_port_cfg = disable_implicit_release ?
173 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
175 if (dev_info.max_num_events < config.nb_events_limit)
176 config.nb_events_limit = dev_info.max_num_events;
177 if (dev_info.max_event_port_dequeue_depth <
178 config.nb_event_port_dequeue_depth)
179 config.nb_event_port_dequeue_depth =
180 dev_info.max_event_port_dequeue_depth;
181 if (dev_info.max_event_port_enqueue_depth <
182 config.nb_event_port_enqueue_depth)
183 config.nb_event_port_enqueue_depth =
184 dev_info.max_event_port_enqueue_depth;
186 ret = rte_event_dev_configure(dev_id, &config);
188 printf("%d: Error configuring device\n", __LINE__);
192 /* Q creation - one load balanced per pipeline stage*/
193 printf(" Stages:\n");
194 for (i = 0; i < cdata.num_stages; i++) {
195 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
196 printf("%d: error creating qid %d\n", __LINE__, i);
200 cdata.next_qid[i] = i+1;
201 worker_queues[i].queue_id = i;
202 if (cdata.enable_queue_priorities) {
203 /* calculate priority stepping for each stage, leaving
204 * headroom of 1 for the SINGLE_LINK TX below
206 const uint32_t prio_delta =
207 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
209 /* higher priority for queues closer to tx */
210 wkr_q_conf.priority =
211 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
214 const char *type_str = "Atomic";
215 switch (wkr_q_conf.schedule_type) {
216 case RTE_SCHED_TYPE_ORDERED:
217 type_str = "Ordered";
219 case RTE_SCHED_TYPE_PARALLEL:
220 type_str = "Parallel";
223 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
224 wkr_q_conf.priority);
228 /* final queue for sending to TX core */
229 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
230 printf("%d: error creating qid %d\n", __LINE__, i);
233 cdata.tx_queue_id = i;
235 if (wkr_p_conf.new_event_threshold > config.nb_events_limit)
236 wkr_p_conf.new_event_threshold = config.nb_events_limit;
237 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
238 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
239 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
240 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
242 /* set up one port per worker, linking to all stage queues */
243 for (i = 0; i < cdata.num_workers; i++) {
244 struct worker_data *w = &worker_data[i];
246 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
247 printf("Error setting up port %d\n", i);
252 for (s = 0; s < cdata.num_stages; s++) {
253 if (rte_event_port_link(dev_id, i,
254 &worker_queues[s].queue_id,
255 &worker_queues[s].priority,
257 printf("%d: error creating link for port %d\n",
265 ret = rte_event_dev_service_id_get(dev_id,
266 &fdata->evdev_service_id);
267 if (ret != -ESRCH && ret != 0) {
268 printf("Error getting the service ID for sw eventdev\n");
271 rte_service_runstate_set(fdata->evdev_service_id, 1);
272 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
278 * Initializes a given port using global settings and with the RX buffers
279 * coming from the mbuf_pool passed as a parameter.
282 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
284 struct rte_eth_rxconf rx_conf;
285 static const struct rte_eth_conf port_conf_default = {
287 .mq_mode = ETH_MQ_RX_RSS,
291 .rss_hf = ETH_RSS_IP |
297 const uint16_t rx_rings = 1, tx_rings = 1;
298 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
299 struct rte_eth_conf port_conf = port_conf_default;
302 struct rte_eth_dev_info dev_info;
303 struct rte_eth_txconf txconf;
305 if (!rte_eth_dev_is_valid_port(port))
308 retval = rte_eth_dev_info_get(port, &dev_info);
310 printf("Error during getting device (port %u) info: %s\n",
311 port, strerror(-retval));
315 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
316 port_conf.txmode.offloads |=
317 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
319 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
320 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
322 rx_conf = dev_info.default_rxconf;
323 rx_conf.offloads = port_conf.rxmode.offloads;
325 port_conf.rx_adv_conf.rss_conf.rss_hf &=
326 dev_info.flow_type_rss_offloads;
327 if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
328 port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
329 printf("Port %u modified RSS hash function based on hardware support,"
330 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
332 port_conf_default.rx_adv_conf.rss_conf.rss_hf,
333 port_conf.rx_adv_conf.rss_conf.rss_hf);
336 /* Configure the Ethernet device. */
337 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
341 /* Allocate and set up 1 RX queue per Ethernet port. */
342 for (q = 0; q < rx_rings; q++) {
343 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 rte_eth_dev_socket_id(port), &rx_conf,
350 txconf = dev_info.default_txconf;
351 txconf.offloads = port_conf_default.txmode.offloads;
352 /* Allocate and set up 1 TX queue per Ethernet port. */
353 for (q = 0; q < tx_rings; q++) {
354 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
355 rte_eth_dev_socket_id(port), &txconf);
360 /* Display the port MAC address. */
361 struct rte_ether_addr addr;
362 retval = rte_eth_macaddr_get(port, &addr);
364 printf("Failed to get MAC address (port %u): %s\n",
365 port, rte_strerror(-retval));
369 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
370 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
371 (unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
373 /* Enable RX in promiscuous mode for the Ethernet device. */
374 retval = rte_eth_promiscuous_enable(port);
382 init_ports(uint16_t num_ports)
387 cdata.num_mbuf = 16384 * num_ports;
389 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
390 /* mbufs */ cdata.num_mbuf,
391 /* cache_size */ 512,
393 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
396 RTE_ETH_FOREACH_DEV(portid)
397 if (port_init(portid, mp) != 0)
398 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
405 init_adapters(uint16_t nb_ports)
409 uint8_t tx_port_id = 0;
410 uint8_t evdev_id = 0;
411 struct rte_event_dev_info dev_info;
413 ret = rte_event_dev_info_get(evdev_id, &dev_info);
415 struct rte_event_port_conf adptr_p_conf = {
416 .dequeue_depth = cdata.worker_cq_depth,
418 .new_event_threshold = 4096,
419 .event_port_cfg = RTE_EVENT_PORT_CFG_HINT_PRODUCER,
422 if (adptr_p_conf.new_event_threshold > dev_info.max_num_events)
423 adptr_p_conf.new_event_threshold = dev_info.max_num_events;
424 if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
425 adptr_p_conf.dequeue_depth =
426 dev_info.max_event_port_dequeue_depth;
427 if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
428 adptr_p_conf.enqueue_depth =
429 dev_info.max_event_port_enqueue_depth;
431 init_ports(nb_ports);
432 /* Create one adapter for all the ethernet ports. */
433 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
436 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
437 cdata.rx_adapter_id);
439 ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
442 rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
443 cdata.tx_adapter_id);
445 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
446 memset(&queue_conf, 0, sizeof(queue_conf));
447 queue_conf.ev.sched_type = cdata.queue_type;
448 queue_conf.ev.queue_id = cdata.qid[0];
450 for (i = 0; i < nb_ports; i++) {
451 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
454 rte_exit(EXIT_FAILURE,
455 "Failed to add queues to Rx adapter");
457 ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
460 rte_exit(EXIT_FAILURE,
461 "Failed to add queues to Tx adapter");
464 ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
467 rte_exit(EXIT_FAILURE,
468 "Failed to get Tx adapter port id");
469 ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
472 rte_exit(EXIT_FAILURE,
473 "Unable to link Tx adapter port to Tx queue");
475 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
476 &fdata->rxadptr_service_id);
477 if (ret != -ESRCH && ret != 0) {
478 rte_exit(EXIT_FAILURE,
479 "Error getting the service ID for Rx adapter\n");
481 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
482 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
484 ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
485 &fdata->txadptr_service_id);
486 if (ret != -ESRCH && ret != 0) {
487 rte_exit(EXIT_FAILURE,
488 "Error getting the service ID for Tx adapter\n");
490 rte_service_runstate_set(fdata->txadptr_service_id, 1);
491 rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
493 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
495 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
496 cdata.rx_adapter_id);
498 ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
500 rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
501 cdata.tx_adapter_id);
503 if (rte_event_dev_start(evdev_id) < 0)
504 rte_exit(EXIT_FAILURE, "Error starting eventdev");
508 generic_opt_check(void)
513 uint8_t rx_needed = 0;
514 uint8_t sched_needed = 0;
515 struct rte_event_dev_info eventdev_info;
517 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
518 rte_event_dev_info_get(0, &eventdev_info);
520 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
521 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
522 rte_exit(EXIT_FAILURE,
523 "Event dev doesn't support all type queues\n");
524 sched_needed = !(eventdev_info.event_dev_cap &
525 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
527 RTE_ETH_FOREACH_DEV(i) {
528 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
530 rte_exit(EXIT_FAILURE,
531 "failed to get event rx adapter capabilities");
533 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
536 if (cdata.worker_lcore_mask == 0 ||
537 (rx_needed && cdata.rx_lcore_mask == 0) ||
538 (cdata.tx_lcore_mask == 0) ||
539 (sched_needed && cdata.sched_lcore_mask == 0)) {
540 printf("Core part of pipeline was not assigned any cores. "
541 "This will stall the pipeline, please check core masks "
542 "(use -h for details on setting core masks):\n"
543 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
544 "\n\tworkers: %"PRIu64"\n",
545 cdata.rx_lcore_mask, cdata.tx_lcore_mask,
546 cdata.sched_lcore_mask,
547 cdata.worker_lcore_mask);
548 rte_exit(-1, "Fix core masks\n");
552 memset(fdata->sched_core, 0,
553 sizeof(unsigned int) * MAX_NUM_CORE);
555 memset(fdata->rx_core, 0,
556 sizeof(unsigned int) * MAX_NUM_CORE);
560 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
563 caps->worker = worker_generic_burst;
565 caps->worker = worker_generic;
568 caps->adptr_setup = init_adapters;
569 caps->scheduler = schedule_devices;
570 caps->evdev_setup = setup_eventdev_generic;
571 caps->check_opt = generic_opt_check;