2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
10 #include <rte_mempool.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_service.h>
20 #include <rte_service_component.h>
22 #define MAX_NUM_STAGES 8
24 #define MAX_NUM_CORE 64
30 } __rte_cache_aligned;
35 } __rte_cache_aligned;
37 typedef int (*worker_loop)(void *);
38 typedef int (*consumer_loop)(void);
39 typedef void (*schedule_loop)(unsigned int);
40 typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
41 typedef void (*rx_adapter_setup)(uint16_t nb_ports);
42 typedef void (*opt_check)(void);
46 consumer_loop consumer;
47 schedule_loop scheduler;
48 eventdev_setup evdev_setup;
49 rx_adapter_setup adptr_setup;
53 struct fastpath_data {
56 uint32_t evdev_service_id;
57 uint32_t rxadptr_service_id;
61 unsigned int rx_core[MAX_NUM_CORE];
62 unsigned int tx_core[MAX_NUM_CORE];
63 unsigned int sched_core[MAX_NUM_CORE];
64 unsigned int worker_core[MAX_NUM_CORE];
65 struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
66 struct setup_data cap;
67 } __rte_cache_aligned;
70 unsigned int active_cores;
71 unsigned int num_workers;
73 unsigned int num_fids;
76 int enable_queue_priorities;
80 unsigned int num_stages;
81 unsigned int worker_cq_depth;
82 int16_t next_qid[MAX_NUM_STAGES+2];
83 int16_t qid[MAX_NUM_STAGES];
84 uint8_t rx_adapter_id;
92 struct cons_data cons_data;
94 struct fastpath_data *fdata;
95 struct config_data cdata;
97 static __rte_always_inline void
98 work(struct rte_mbuf *m)
100 struct ether_hdr *eth;
101 struct ether_addr addr;
103 /* change mac addresses on packet (to use mbuf data) */
105 * FIXME Swap mac address properly and also handle the
106 * case for both odd and even number of stages that the
107 * addresses end up the same at the end of the pipeline
109 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
110 ether_addr_copy(ð->d_addr, &addr);
111 ether_addr_copy(&addr, ð->d_addr);
113 /* do a number of cycles of work per packet */
114 volatile uint64_t start_tsc = rte_rdtsc();
115 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
119 static __rte_always_inline void
120 schedule_devices(unsigned int lcore_id)
122 if (fdata->rx_core[lcore_id]) {
123 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
127 if (fdata->sched_core[lcore_id]) {
128 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
129 !fdata->sched_single);
130 if (cdata.dump_dev_signal) {
131 rte_event_dev_dump(0, stdout);
132 cdata.dump_dev_signal = 0;
136 if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
137 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
138 fdata->cap.consumer();
139 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
143 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);