1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
8 #include <rte_event_ring.h>
9 #include <rte_eventdev.h>
11 #define DSW_PMD_NAME RTE_STR(event_dsw)
13 #define DSW_MAX_PORTS (64)
14 #define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
15 #define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
16 #define DSW_MAX_PORT_OUT_BUFFER (32)
18 #define DSW_MAX_QUEUES (16)
20 #define DSW_MAX_EVENTS (16384)
22 /* Code changes are required to allow more flows than 32k. */
23 #define DSW_MAX_FLOWS_BITS (15)
24 #define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
25 #define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
27 /* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
28 * but the 'dsw' scheduler (more or less) randomly assign flow id to
29 * events on parallel queues, to be able to reuse some of the
30 * migration mechanism and scheduling logic from
31 * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
32 * particular port, the likely-hood of events being scheduled to this
33 * port is reduced, and thus a kind of statistical load balancing is
36 #define DSW_PARALLEL_FLOWS (1024)
38 /* 'Background tasks' are polling the control rings for *
39 * migration-related messages, or flush the output buffer (so
40 * buffered events doesn't linger too long). Shouldn't be too low,
41 * since the system won't benefit from the 'batching' effects from
42 * the output buffer, and shouldn't be too high, since it will make
43 * buffered events linger too long in case the port goes idle.
45 #define DSW_MAX_PORT_OPS_PER_BG_TASK (128)
47 /* Avoid making small 'loans' from the central in-flight event credit
48 * pool, to improve efficiency.
50 #define DSW_MIN_CREDIT_LOAN (64)
51 #define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
52 #define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
54 /* The rings are dimensioned so that all in-flight events can reside
55 * on any one of the port rings, to avoid the trouble of having to
56 * care about the case where there's no room on the destination port's
59 #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
61 #define DSW_MAX_LOAD (INT16_MAX)
62 #define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))
63 #define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)
65 /* The thought behind keeping the load update interval shorter than
66 * the migration interval is that the load from newly migrated flows
67 * should 'show up' on the load measurement before new migrations are
68 * considered. This is to avoid having too many flows, from too many
69 * source ports, to be migrated too quickly to a lightly loaded port -
70 * in particular since this might cause the system to oscillate.
72 #define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)
73 #define DSW_OLD_LOAD_WEIGHT (1)
75 /* The minimum time (in us) between two flow migrations. What puts an
76 * upper limit on the actual migration rate is primarily the pace in
77 * which the ports send and receive control messages, which in turn is
78 * largely a function of how much cycles are spent the processing of
81 #define DSW_MIGRATION_INTERVAL (1000)
82 #define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))
83 #define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))
85 #define DSW_MAX_EVENTS_RECORDED (128)
87 /* Only one outstanding migration per port is allowed */
88 #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS)
90 /* Enough room for paus request/confirm and unpaus request/confirm for
91 * all possible senders.
93 #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
95 /* With DSW_SORT_DEQUEUED enabled, the scheduler will, at the point of
96 * dequeue(), arrange events so that events with the same flow id on
97 * the same queue forms a back-to-back "burst", and also so that such
98 * bursts of different flow ids, but on the same queue, also come
99 * consecutively. All this in an attempt to improve data and
100 * instruction cache usage for the application, at the cost of a
101 * scheduler overhead increase.
104 /* #define DSW_SORT_DEQUEUED */
106 struct dsw_queue_flow {
111 enum dsw_migration_state {
112 DSW_MIGRATION_STATE_IDLE,
113 DSW_MIGRATION_STATE_PAUSING,
114 DSW_MIGRATION_STATE_FORWARDING,
115 DSW_MIGRATION_STATE_UNPAUSING
121 /* Keeping a pointer here to avoid container_of() calls, which
122 * are expensive since they are very frequent and will result
123 * in an integer multiplication (since the port id is an index
124 * into the dsw_evdev port array).
126 struct dsw_evdev *dsw;
128 uint16_t dequeue_depth;
129 uint16_t enqueue_depth;
131 int32_t inflight_credits;
133 int32_t new_event_threshold;
135 uint16_t pending_releases;
137 uint16_t next_parallel_flow_id;
139 uint16_t ops_since_bg_task;
141 /* most recent 'background' processing */
144 /* For port load measurement. */
145 uint64_t next_load_update;
146 uint64_t load_update_interval;
147 uint64_t measurement_start;
149 uint64_t busy_cycles;
150 uint64_t total_busy_cycles;
152 /* For the ctl interface and flow migration mechanism. */
153 uint64_t next_migration;
154 uint64_t migration_interval;
155 enum dsw_migration_state migration_state;
157 uint64_t migration_start;
159 uint64_t migration_latency;
161 uint8_t migration_target_port_id;
162 struct dsw_queue_flow migration_target_qf;
165 uint16_t paused_flows_len;
166 struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
168 /* In a very contrived worst case all inflight events can be
169 * laying around paused here.
171 uint16_t paused_events_len;
172 struct rte_event paused_events[DSW_MAX_EVENTS];
174 uint16_t seen_events_len;
175 uint16_t seen_events_idx;
176 struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
178 uint64_t new_enqueued;
179 uint64_t forward_enqueued;
180 uint64_t release_enqueued;
181 uint64_t queue_enqueued[DSW_MAX_QUEUES];
184 uint64_t queue_dequeued[DSW_MAX_QUEUES];
186 uint16_t out_buffer_len[DSW_MAX_PORTS];
187 struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
189 uint16_t in_buffer_len;
190 uint16_t in_buffer_start;
191 /* This buffer may contain events that were read up from the
192 * in_ring during the flow migration process.
194 struct rte_event in_buffer[DSW_MAX_EVENTS];
196 struct rte_event_ring *in_ring __rte_cache_aligned;
198 struct rte_ring *ctl_in_ring __rte_cache_aligned;
200 /* Estimate of current port load. */
201 rte_atomic16_t load __rte_cache_aligned;
202 } __rte_cache_aligned;
205 uint8_t schedule_type;
206 uint8_t serving_ports[DSW_MAX_PORTS];
207 uint16_t num_serving_ports;
209 uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
213 struct rte_eventdev_data *data;
215 struct dsw_port ports[DSW_MAX_PORTS];
217 struct dsw_queue queues[DSW_MAX_QUEUES];
219 int32_t max_inflight;
221 rte_atomic32_t credits_on_loan __rte_cache_aligned;
224 #define DSW_CTL_PAUS_REQ (0)
225 #define DSW_CTL_UNPAUS_REQ (1)
226 #define DSW_CTL_CFM (2)
230 uint8_t originating_port_id;
235 uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
236 uint16_t dsw_event_enqueue_burst(void *port,
237 const struct rte_event events[],
238 uint16_t events_len);
239 uint16_t dsw_event_enqueue_new_burst(void *port,
240 const struct rte_event events[],
241 uint16_t events_len);
242 uint16_t dsw_event_enqueue_forward_burst(void *port,
243 const struct rte_event events[],
244 uint16_t events_len);
246 uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
247 uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
248 uint16_t num, uint64_t wait);
250 int dsw_xstats_get_names(const struct rte_eventdev *dev,
251 enum rte_event_dev_xstats_mode mode,
252 uint8_t queue_port_id,
253 struct rte_event_dev_xstats_name *xstats_names,
254 unsigned int *ids, unsigned int size);
255 int dsw_xstats_get(const struct rte_eventdev *dev,
256 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
257 const unsigned int ids[], uint64_t values[], unsigned int n);
258 uint64_t dsw_xstats_get_by_name(const struct rte_eventdev *dev,
259 const char *name, unsigned int *id);
261 static inline struct dsw_evdev *
262 dsw_pmd_priv(const struct rte_eventdev *eventdev)
264 return eventdev->data->dev_private;
267 #define DSW_LOG_DP(level, fmt, args...) \
268 RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
270 __func__, __LINE__, ## args)
272 #define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
273 DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)