2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
10 #include <rte_mempool.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_event_eth_tx_adapter.h>
20 #include <rte_service.h>
21 #include <rte_service_component.h>
23 #define MAX_NUM_STAGES 8
25 #define MAX_NUM_CORE 64
30 } __rte_cache_aligned;
32 typedef int (*worker_loop)(void *);
33 typedef void (*schedule_loop)(unsigned int);
34 typedef int (*eventdev_setup)(struct worker_data *);
35 typedef void (*adapter_setup)(uint16_t nb_ports);
36 typedef void (*opt_check)(void);
40 schedule_loop scheduler;
41 eventdev_setup evdev_setup;
42 adapter_setup adptr_setup;
46 struct fastpath_data {
48 uint32_t evdev_service_id;
49 uint32_t rxadptr_service_id;
50 uint32_t txadptr_service_id;
54 uint64_t rx_core[MAX_NUM_CORE];
55 uint64_t tx_core[MAX_NUM_CORE];
56 uint64_t sched_core[MAX_NUM_CORE];
57 uint64_t worker_core[MAX_NUM_CORE];
58 struct setup_data cap;
59 } __rte_cache_aligned;
62 unsigned int active_cores;
63 unsigned int num_workers;
66 unsigned int num_fids;
69 int enable_queue_priorities;
74 unsigned int num_stages;
75 unsigned int worker_cq_depth;
76 unsigned int rx_stride;
77 /* Use rx stride value to reduce congestion in entry queue when using
78 * multiple eth ports by forming multiple event queue pipelines.
80 int16_t next_qid[MAX_NUM_STAGES+2];
81 int16_t qid[MAX_NUM_STAGES];
82 uint8_t rx_adapter_id;
83 uint8_t tx_adapter_id;
85 uint64_t worker_lcore_mask;
86 uint64_t rx_lcore_mask;
87 uint64_t tx_lcore_mask;
88 uint64_t sched_lcore_mask;
96 extern struct fastpath_data *fdata;
97 extern struct config_data cdata;
99 static __rte_always_inline void
100 exchange_mac(struct rte_mbuf *m)
102 struct rte_ether_hdr *eth;
103 struct rte_ether_addr addr;
105 /* change mac addresses on packet (to use mbuf data) */
106 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
107 rte_ether_addr_copy(ð->dst_addr, &addr);
108 rte_ether_addr_copy(&addr, ð->dst_addr);
111 static __rte_always_inline void
114 /* do a number of cycles of work per packet */
115 volatile uint64_t start_tsc = rte_rdtsc();
116 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
120 static __rte_always_inline void
121 schedule_devices(unsigned int lcore_id)
123 if (fdata->rx_core[lcore_id]) {
124 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
128 if (fdata->sched_core[lcore_id]) {
129 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
130 !fdata->sched_single);
131 if (cdata.dump_dev_signal) {
132 rte_event_dev_dump(0, stdout);
133 cdata.dump_dev_signal = 0;
137 if (fdata->tx_core[lcore_id]) {
138 rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,
144 event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
145 void *args __rte_unused)
147 rte_mempool_put(args, ev.event_ptr);
151 worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
152 uint16_t nb_enq, uint16_t nb_deq)
156 if (!(nb_deq - nb_enq))
160 for (i = nb_enq; i < nb_deq; i++) {
161 if (events[i].op == RTE_EVENT_OP_RELEASE)
163 rte_pktmbuf_free(events[i].mbuf);
166 for (i = 0; i < nb_deq; i++)
167 events[i].op = RTE_EVENT_OP_RELEASE;
168 rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
171 rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
174 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
175 void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);