1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
8 #include <rte_event_ring.h>
9 #include <rte_eventdev.h>
11 #define DSW_PMD_NAME RTE_STR(event_dsw)
13 #define DSW_MAX_PORTS (64)
14 #define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
15 #define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
16 #define DSW_MAX_PORT_OUT_BUFFER (32)
18 #define DSW_MAX_QUEUES (16)
20 #define DSW_MAX_EVENTS (16384)
22 /* Multiple 24-bit flow ids will map to the same DSW-level flow. The
23 * number of DSW flows should be high enough make it unlikely that
24 * flow ids of several large flows hash to the same DSW-level flow.
25 * Such collisions will limit parallism and thus the number of cores
26 * that may be utilized. However, configuring a large number of DSW
27 * flows might potentially, depending on traffic and actual
28 * application flow id value range, result in each such DSW-level flow
29 * being very small. The effect of migrating such flows will be small,
30 * in terms amount of processing load redistributed. This will in turn
31 * reduce the load balancing speed, since flow migration rate has an
32 * upper limit. Code changes are required to allow > 32k DSW-level
35 #define DSW_MAX_FLOWS_BITS (13)
36 #define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
37 #define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
39 /* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
40 * but the 'dsw' scheduler (more or less) randomly assign flow id to
41 * events on parallel queues, to be able to reuse some of the
42 * migration mechanism and scheduling logic from
43 * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
44 * particular port, the likely-hood of events being scheduled to this
45 * port is reduced, and thus a kind of statistical load balancing is
48 #define DSW_PARALLEL_FLOWS (1024)
50 /* 'Background tasks' are polling the control rings for *
51 * migration-related messages, or flush the output buffer (so
52 * buffered events doesn't linger too long). Shouldn't be too low,
53 * since the system won't benefit from the 'batching' effects from
54 * the output buffer, and shouldn't be too high, since it will make
55 * buffered events linger too long in case the port goes idle.
57 #define DSW_MAX_PORT_OPS_PER_BG_TASK (128)
59 /* Avoid making small 'loans' from the central in-flight event credit
60 * pool, to improve efficiency.
62 #define DSW_MIN_CREDIT_LOAN (64)
63 #define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
64 #define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
66 /* The rings are dimensioned so that all in-flight events can reside
67 * on any one of the port rings, to avoid the trouble of having to
68 * care about the case where there's no room on the destination port's
71 #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
73 #define DSW_MAX_LOAD (INT16_MAX)
74 #define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))
75 #define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)
77 /* The thought behind keeping the load update interval shorter than
78 * the migration interval is that the load from newly migrated flows
79 * should 'show up' on the load measurement before new migrations are
80 * considered. This is to avoid having too many flows, from too many
81 * source ports, to be migrated too quickly to a lightly loaded port -
82 * in particular since this might cause the system to oscillate.
84 #define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)
85 #define DSW_OLD_LOAD_WEIGHT (1)
87 /* The minimum time (in us) between two flow migrations. What puts an
88 * upper limit on the actual migration rate is primarily the pace in
89 * which the ports send and receive control messages, which in turn is
90 * largely a function of how much cycles are spent the processing of
93 #define DSW_MIGRATION_INTERVAL (1000)
94 #define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))
95 #define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))
97 #define DSW_MAX_EVENTS_RECORDED (128)
99 /* Only one outstanding migration per port is allowed */
100 #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS)
102 /* Enough room for paus request/confirm and unpaus request/confirm for
103 * all possible senders.
105 #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
107 /* With DSW_SORT_DEQUEUED enabled, the scheduler will, at the point of
108 * dequeue(), arrange events so that events with the same flow id on
109 * the same queue forms a back-to-back "burst", and also so that such
110 * bursts of different flow ids, but on the same queue, also come
111 * consecutively. All this in an attempt to improve data and
112 * instruction cache usage for the application, at the cost of a
113 * scheduler overhead increase.
116 /* #define DSW_SORT_DEQUEUED */
118 struct dsw_queue_flow {
123 enum dsw_migration_state {
124 DSW_MIGRATION_STATE_IDLE,
125 DSW_MIGRATION_STATE_PAUSING,
126 DSW_MIGRATION_STATE_FORWARDING,
127 DSW_MIGRATION_STATE_UNPAUSING
133 /* Keeping a pointer here to avoid container_of() calls, which
134 * are expensive since they are very frequent and will result
135 * in an integer multiplication (since the port id is an index
136 * into the dsw_evdev port array).
138 struct dsw_evdev *dsw;
140 uint16_t dequeue_depth;
141 uint16_t enqueue_depth;
143 int32_t inflight_credits;
145 int32_t new_event_threshold;
147 uint16_t pending_releases;
149 uint16_t next_parallel_flow_id;
151 uint16_t ops_since_bg_task;
153 /* most recent 'background' processing */
156 /* For port load measurement. */
157 uint64_t next_load_update;
158 uint64_t load_update_interval;
159 uint64_t measurement_start;
161 uint64_t busy_cycles;
162 uint64_t total_busy_cycles;
164 /* For the ctl interface and flow migration mechanism. */
165 uint64_t next_emigration;
166 uint64_t migration_interval;
167 enum dsw_migration_state migration_state;
169 uint64_t emigration_start;
170 uint64_t emigrations;
171 uint64_t emigration_latency;
173 uint8_t emigration_target_port_id;
174 struct dsw_queue_flow emigration_target_qf;
177 uint64_t immigrations;
179 uint16_t paused_flows_len;
180 struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
182 /* In a very contrived worst case all inflight events can be
183 * laying around paused here.
185 uint16_t paused_events_len;
186 struct rte_event paused_events[DSW_MAX_EVENTS];
188 uint16_t seen_events_len;
189 uint16_t seen_events_idx;
190 struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
192 uint64_t enqueue_calls;
193 uint64_t new_enqueued;
194 uint64_t forward_enqueued;
195 uint64_t release_enqueued;
196 uint64_t queue_enqueued[DSW_MAX_QUEUES];
198 uint64_t dequeue_calls;
200 uint64_t queue_dequeued[DSW_MAX_QUEUES];
202 uint16_t out_buffer_len[DSW_MAX_PORTS];
203 struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
205 uint16_t in_buffer_len;
206 uint16_t in_buffer_start;
207 /* This buffer may contain events that were read up from the
208 * in_ring during the flow migration process.
210 struct rte_event in_buffer[DSW_MAX_EVENTS];
212 struct rte_event_ring *in_ring __rte_cache_aligned;
214 struct rte_ring *ctl_in_ring __rte_cache_aligned;
216 /* Estimate of current port load. */
217 rte_atomic16_t load __rte_cache_aligned;
218 } __rte_cache_aligned;
221 uint8_t schedule_type;
222 uint8_t serving_ports[DSW_MAX_PORTS];
223 uint16_t num_serving_ports;
225 uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
229 struct rte_eventdev_data *data;
231 struct dsw_port ports[DSW_MAX_PORTS];
233 struct dsw_queue queues[DSW_MAX_QUEUES];
235 int32_t max_inflight;
237 rte_atomic32_t credits_on_loan __rte_cache_aligned;
240 #define DSW_CTL_PAUS_REQ (0)
241 #define DSW_CTL_UNPAUS_REQ (1)
242 #define DSW_CTL_CFM (2)
246 uint8_t originating_port_id;
251 uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
252 uint16_t dsw_event_enqueue_burst(void *port,
253 const struct rte_event events[],
254 uint16_t events_len);
255 uint16_t dsw_event_enqueue_new_burst(void *port,
256 const struct rte_event events[],
257 uint16_t events_len);
258 uint16_t dsw_event_enqueue_forward_burst(void *port,
259 const struct rte_event events[],
260 uint16_t events_len);
262 uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
263 uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
264 uint16_t num, uint64_t wait);
266 int dsw_xstats_get_names(const struct rte_eventdev *dev,
267 enum rte_event_dev_xstats_mode mode,
268 uint8_t queue_port_id,
269 struct rte_event_dev_xstats_name *xstats_names,
270 unsigned int *ids, unsigned int size);
271 int dsw_xstats_get(const struct rte_eventdev *dev,
272 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
273 const unsigned int ids[], uint64_t values[], unsigned int n);
274 uint64_t dsw_xstats_get_by_name(const struct rte_eventdev *dev,
275 const char *name, unsigned int *id);
277 static inline struct dsw_evdev *
278 dsw_pmd_priv(const struct rte_eventdev *eventdev)
280 return eventdev->data->dev_private;
283 #define DSW_LOG_DP(level, fmt, args...) \
284 RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
286 __func__, __LINE__, ## args)
288 #define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
289 DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)