1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
8 #include <rte_event_ring.h>
9 #include <rte_eventdev.h>
11 #define DSW_PMD_NAME RTE_STR(event_dsw)
13 #define DSW_MAX_PORTS (64)
14 #define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
15 #define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
16 #define DSW_MAX_PORT_OUT_BUFFER (32)
18 #define DSW_MAX_QUEUES (16)
20 #define DSW_MAX_EVENTS (16384)
22 /* Multiple 24-bit flow ids will map to the same DSW-level flow. The
23 * number of DSW flows should be high enough make it unlikely that
24 * flow ids of several large flows hash to the same DSW-level flow.
25 * Such collisions will limit parallism and thus the number of cores
26 * that may be utilized. However, configuring a large number of DSW
27 * flows might potentially, depending on traffic and actual
28 * application flow id value range, result in each such DSW-level flow
29 * being very small. The effect of migrating such flows will be small,
30 * in terms amount of processing load redistributed. This will in turn
31 * reduce the load balancing speed, since flow migration rate has an
32 * upper limit. Code changes are required to allow > 32k DSW-level
35 #define DSW_MAX_FLOWS_BITS (13)
36 #define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
37 #define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
39 /* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
40 * but the 'dsw' scheduler (more or less) randomly assign flow id to
41 * events on parallel queues, to be able to reuse some of the
42 * migration mechanism and scheduling logic from
43 * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
44 * particular port, the likely-hood of events being scheduled to this
45 * port is reduced, and thus a kind of statistical load balancing is
48 #define DSW_PARALLEL_FLOWS (1024)
50 /* 'Background tasks' are polling the control rings for *
51 * migration-related messages, or flush the output buffer (so
52 * buffered events doesn't linger too long). Shouldn't be too low,
53 * since the system won't benefit from the 'batching' effects from
54 * the output buffer, and shouldn't be too high, since it will make
55 * buffered events linger too long in case the port goes idle.
57 #define DSW_MAX_PORT_OPS_PER_BG_TASK (128)
59 /* Avoid making small 'loans' from the central in-flight event credit
60 * pool, to improve efficiency.
62 #define DSW_MIN_CREDIT_LOAN (64)
63 #define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
64 #define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
66 /* The rings are dimensioned so that all in-flight events can reside
67 * on any one of the port rings, to avoid the trouble of having to
68 * care about the case where there's no room on the destination port's
71 #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
73 #define DSW_MAX_LOAD (INT16_MAX)
74 #define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))
75 #define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)
77 /* The thought behind keeping the load update interval shorter than
78 * the migration interval is that the load from newly migrated flows
79 * should 'show up' on the load measurement before new migrations are
80 * considered. This is to avoid having too many flows, from too many
81 * source ports, to be migrated too quickly to a lightly loaded port -
82 * in particular since this might cause the system to oscillate.
84 #define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)
85 #define DSW_OLD_LOAD_WEIGHT (1)
87 /* The minimum time (in us) between two flow migrations. What puts an
88 * upper limit on the actual migration rate is primarily the pace in
89 * which the ports send and receive control messages, which in turn is
90 * largely a function of how much cycles are spent the processing of
93 #define DSW_MIGRATION_INTERVAL (1000)
94 #define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))
95 #define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))
96 #define DSW_REBALANCE_THRESHOLD (DSW_LOAD_FROM_PERCENT(3))
98 #define DSW_MAX_EVENTS_RECORDED (128)
100 #define DSW_MAX_FLOWS_PER_MIGRATION (8)
102 /* Only one outstanding migration per port is allowed */
103 #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)
105 /* Enough room for paus request/confirm and unpaus request/confirm for
106 * all possible senders.
108 #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
110 /* With DSW_SORT_DEQUEUED enabled, the scheduler will, at the point of
111 * dequeue(), arrange events so that events with the same flow id on
112 * the same queue forms a back-to-back "burst", and also so that such
113 * bursts of different flow ids, but on the same queue, also come
114 * consecutively. All this in an attempt to improve data and
115 * instruction cache usage for the application, at the cost of a
116 * scheduler overhead increase.
119 /* #define DSW_SORT_DEQUEUED */
121 struct dsw_queue_flow {
126 enum dsw_migration_state {
127 DSW_MIGRATION_STATE_IDLE,
128 DSW_MIGRATION_STATE_PAUSING,
129 DSW_MIGRATION_STATE_FORWARDING,
130 DSW_MIGRATION_STATE_UNPAUSING
136 /* Keeping a pointer here to avoid container_of() calls, which
137 * are expensive since they are very frequent and will result
138 * in an integer multiplication (since the port id is an index
139 * into the dsw_evdev port array).
141 struct dsw_evdev *dsw;
143 uint16_t dequeue_depth;
144 uint16_t enqueue_depth;
146 int32_t inflight_credits;
148 int32_t new_event_threshold;
150 uint16_t pending_releases;
152 uint16_t next_parallel_flow_id;
154 uint16_t ops_since_bg_task;
156 /* most recent 'background' processing */
159 /* For port load measurement. */
160 uint64_t next_load_update;
161 uint64_t load_update_interval;
162 uint64_t measurement_start;
164 uint64_t busy_cycles;
165 uint64_t total_busy_cycles;
167 /* For the ctl interface and flow migration mechanism. */
168 uint64_t next_emigration;
169 uint64_t migration_interval;
170 enum dsw_migration_state migration_state;
172 uint64_t emigration_start;
173 uint64_t emigrations;
174 uint64_t emigration_latency;
176 uint8_t emigration_target_port_ids[DSW_MAX_FLOWS_PER_MIGRATION];
177 struct dsw_queue_flow
178 emigration_target_qfs[DSW_MAX_FLOWS_PER_MIGRATION];
179 uint8_t emigration_targets_len;
182 uint64_t immigrations;
184 uint16_t paused_flows_len;
185 struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
187 /* In a very contrived worst case all inflight events can be
188 * laying around paused here.
190 uint16_t paused_events_len;
191 struct rte_event paused_events[DSW_MAX_EVENTS];
193 uint16_t seen_events_len;
194 uint16_t seen_events_idx;
195 struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
197 uint64_t enqueue_calls;
198 uint64_t new_enqueued;
199 uint64_t forward_enqueued;
200 uint64_t release_enqueued;
201 uint64_t queue_enqueued[DSW_MAX_QUEUES];
203 uint64_t dequeue_calls;
205 uint64_t queue_dequeued[DSW_MAX_QUEUES];
207 uint16_t out_buffer_len[DSW_MAX_PORTS];
208 struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
210 uint16_t in_buffer_len;
211 uint16_t in_buffer_start;
212 /* This buffer may contain events that were read up from the
213 * in_ring during the flow migration process.
215 struct rte_event in_buffer[DSW_MAX_EVENTS];
217 struct rte_event_ring *in_ring __rte_cache_aligned;
219 struct rte_ring *ctl_in_ring __rte_cache_aligned;
221 /* Estimate of current port load. */
222 rte_atomic16_t load __rte_cache_aligned;
223 /* Estimate of flows currently migrating to this port. */
224 rte_atomic32_t immigration_load __rte_cache_aligned;
225 } __rte_cache_aligned;
228 uint8_t schedule_type;
229 uint8_t serving_ports[DSW_MAX_PORTS];
230 uint16_t num_serving_ports;
232 uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
236 struct rte_eventdev_data *data;
238 struct dsw_port ports[DSW_MAX_PORTS];
240 struct dsw_queue queues[DSW_MAX_QUEUES];
242 int32_t max_inflight;
244 rte_atomic32_t credits_on_loan __rte_cache_aligned;
247 #define DSW_CTL_PAUS_REQ (0)
248 #define DSW_CTL_UNPAUS_REQ (1)
249 #define DSW_CTL_CFM (2)
253 uint8_t originating_port_id;
255 struct dsw_queue_flow qfs[DSW_MAX_FLOWS_PER_MIGRATION];
258 uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
259 uint16_t dsw_event_enqueue_burst(void *port,
260 const struct rte_event events[],
261 uint16_t events_len);
262 uint16_t dsw_event_enqueue_new_burst(void *port,
263 const struct rte_event events[],
264 uint16_t events_len);
265 uint16_t dsw_event_enqueue_forward_burst(void *port,
266 const struct rte_event events[],
267 uint16_t events_len);
269 uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
270 uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
271 uint16_t num, uint64_t wait);
273 int dsw_xstats_get_names(const struct rte_eventdev *dev,
274 enum rte_event_dev_xstats_mode mode,
275 uint8_t queue_port_id,
276 struct rte_event_dev_xstats_name *xstats_names,
277 unsigned int *ids, unsigned int size);
278 int dsw_xstats_get(const struct rte_eventdev *dev,
279 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
280 const unsigned int ids[], uint64_t values[], unsigned int n);
281 uint64_t dsw_xstats_get_by_name(const struct rte_eventdev *dev,
282 const char *name, unsigned int *id);
284 static inline struct dsw_evdev *
285 dsw_pmd_priv(const struct rte_eventdev *eventdev)
287 return eventdev->data->dev_private;
290 #define DSW_LOG_DP(level, fmt, args...) \
291 RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
293 __func__, __LINE__, ## args)
295 #define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
296 DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)