2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
10 #include <rte_mempool.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_event_eth_tx_adapter.h>
20 #include <rte_service.h>
21 #include <rte_service_component.h>
23 #define MAX_NUM_STAGES 8
25 #define MAX_NUM_CORE 64
30 } __rte_cache_aligned;
32 typedef int (*worker_loop)(void *);
33 typedef void (*schedule_loop)(unsigned int);
34 typedef int (*eventdev_setup)(struct worker_data *);
35 typedef void (*adapter_setup)(uint16_t nb_ports);
36 typedef void (*opt_check)(void);
40 schedule_loop scheduler;
41 eventdev_setup evdev_setup;
42 adapter_setup adptr_setup;
46 struct fastpath_data {
48 uint32_t evdev_service_id;
49 uint32_t rxadptr_service_id;
50 uint32_t txadptr_service_id;
54 unsigned int rx_core[MAX_NUM_CORE];
55 unsigned int tx_core[MAX_NUM_CORE];
56 unsigned int sched_core[MAX_NUM_CORE];
57 unsigned int worker_core[MAX_NUM_CORE];
58 struct setup_data cap;
59 } __rte_cache_aligned;
62 unsigned int active_cores;
63 unsigned int num_workers;
66 unsigned int num_fids;
69 int enable_queue_priorities;
74 unsigned int num_stages;
75 unsigned int worker_cq_depth;
76 unsigned int rx_stride;
77 /* Use rx stride value to reduce congestion in entry queue when using
78 * multiple eth ports by forming multiple event queue pipelines.
80 int16_t next_qid[MAX_NUM_STAGES+2];
81 int16_t qid[MAX_NUM_STAGES];
82 uint8_t rx_adapter_id;
83 uint8_t tx_adapter_id;
85 uint64_t worker_lcore_mask;
86 uint64_t rx_lcore_mask;
87 uint64_t tx_lcore_mask;
88 uint64_t sched_lcore_mask;
96 struct fastpath_data *fdata;
97 struct config_data cdata;
99 static __rte_always_inline void
100 exchange_mac(struct rte_mbuf *m)
102 struct ether_hdr *eth;
103 struct ether_addr addr;
105 /* change mac addresses on packet (to use mbuf data) */
106 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
107 ether_addr_copy(ð->d_addr, &addr);
108 ether_addr_copy(&addr, ð->d_addr);
111 static __rte_always_inline void
114 /* do a number of cycles of work per packet */
115 volatile uint64_t start_tsc = rte_rdtsc();
116 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
120 static __rte_always_inline void
121 schedule_devices(unsigned int lcore_id)
123 if (fdata->rx_core[lcore_id]) {
124 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
128 if (fdata->sched_core[lcore_id]) {
129 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
130 !fdata->sched_single);
131 if (cdata.dump_dev_signal) {
132 rte_event_dev_dump(0, stdout);
133 cdata.dump_dev_signal = 0;
137 if (fdata->tx_core[lcore_id]) {
138 rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,
143 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
144 void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);