2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
10 #include <rte_mempool.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_service.h>
20 #include <rte_service_component.h>
22 #define MAX_NUM_STAGES 8
24 #define MAX_NUM_CORE 64
30 } __rte_cache_aligned;
35 } __rte_cache_aligned;
37 typedef int (*worker_loop)(void *);
38 typedef int (*consumer_loop)(void);
39 typedef void (*schedule_loop)(unsigned int);
40 typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
41 typedef void (*rx_adapter_setup)(uint16_t nb_ports);
42 typedef void (*opt_check)(void);
46 consumer_loop consumer;
47 schedule_loop scheduler;
48 eventdev_setup evdev_setup;
49 rx_adapter_setup adptr_setup;
53 struct fastpath_data {
56 uint32_t evdev_service_id;
57 uint32_t rxadptr_service_id;
61 unsigned int rx_core[MAX_NUM_CORE];
62 unsigned int tx_core[MAX_NUM_CORE];
63 unsigned int sched_core[MAX_NUM_CORE];
64 unsigned int worker_core[MAX_NUM_CORE];
65 struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
66 struct setup_data cap;
67 } __rte_cache_aligned;
70 unsigned int active_cores;
71 unsigned int num_workers;
73 unsigned int num_fids;
76 int enable_queue_priorities;
81 unsigned int num_stages;
82 unsigned int worker_cq_depth;
83 unsigned int rx_stride;
84 /* Use rx stride value to reduce congestion in entry queue when using
85 * multiple eth ports by forming multiple event queue pipelines.
87 int16_t next_qid[MAX_NUM_STAGES+2];
88 int16_t qid[MAX_NUM_STAGES];
89 uint8_t rx_adapter_id;
90 uint64_t worker_lcore_mask;
91 uint64_t rx_lcore_mask;
92 uint64_t tx_lcore_mask;
93 uint64_t sched_lcore_mask;
101 struct cons_data cons_data;
103 struct fastpath_data *fdata;
104 struct config_data cdata;
106 static __rte_always_inline void
107 exchange_mac(struct rte_mbuf *m)
109 struct ether_hdr *eth;
110 struct ether_addr addr;
112 /* change mac addresses on packet (to use mbuf data) */
113 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
114 ether_addr_copy(ð->d_addr, &addr);
115 ether_addr_copy(&addr, ð->d_addr);
118 static __rte_always_inline void
121 /* do a number of cycles of work per packet */
122 volatile uint64_t start_tsc = rte_rdtsc();
123 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
127 static __rte_always_inline void
128 schedule_devices(unsigned int lcore_id)
130 if (fdata->rx_core[lcore_id]) {
131 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
135 if (fdata->sched_core[lcore_id]) {
136 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
137 !fdata->sched_single);
138 if (cdata.dump_dev_signal) {
139 rte_event_dev_dump(0, stdout);
140 cdata.dump_dev_signal = 0;
144 if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
145 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
146 fdata->cap.consumer();
147 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
151 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
152 void set_worker_tx_setup_data(struct setup_data *caps, bool burst);