1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
8 #include <rte_event_ring.h>
9 #include <rte_eventdev.h>
11 #define DSW_PMD_NAME RTE_STR(event_dsw)
13 /* Code changes are required to allow more ports. */
14 #define DSW_MAX_PORTS (64)
15 #define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
16 #define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
17 #define DSW_MAX_PORT_OUT_BUFFER (32)
19 #define DSW_MAX_QUEUES (16)
21 #define DSW_MAX_EVENTS (16384)
23 /* Code changes are required to allow more flows than 32k. */
24 #define DSW_MAX_FLOWS_BITS (15)
25 #define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
26 #define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
28 /* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
29 * but the 'dsw' scheduler (more or less) randomly assign flow id to
30 * events on parallel queues, to be able to reuse some of the
31 * migration mechanism and scheduling logic from
32 * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
33 * particular port, the likely-hood of events being scheduled to this
34 * port is reduced, and thus a kind of statistical load balancing is
37 #define DSW_PARALLEL_FLOWS (1024)
39 /* 'Background tasks' are polling the control rings for *
40 * migration-related messages, or flush the output buffer (so
41 * buffered events doesn't linger too long). Shouldn't be too low,
42 * since the system won't benefit from the 'batching' effects from
43 * the output buffer, and shouldn't be too high, since it will make
44 * buffered events linger too long in case the port goes idle.
46 #define DSW_MAX_PORT_OPS_PER_BG_TASK (128)
48 /* Avoid making small 'loans' from the central in-flight event credit
49 * pool, to improve efficiency.
51 #define DSW_MIN_CREDIT_LOAN (64)
52 #define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
53 #define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
55 /* The rings are dimensioned so that all in-flight events can reside
56 * on any one of the port rings, to avoid the trouble of having to
57 * care about the case where there's no room on the destination port's
60 #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
62 #define DSW_MAX_LOAD (INT16_MAX)
63 #define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))
64 #define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)
66 /* The thought behind keeping the load update interval shorter than
67 * the migration interval is that the load from newly migrated flows
68 * should 'show up' on the load measurement before new migrations are
69 * considered. This is to avoid having too many flows, from too many
70 * source ports, to be migrated too quickly to a lightly loaded port -
71 * in particular since this might cause the system to oscillate.
73 #define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)
74 #define DSW_OLD_LOAD_WEIGHT (1)
76 /* The minimum time (in us) between two flow migrations. What puts an
77 * upper limit on the actual migration rate is primarily the pace in
78 * which the ports send and receive control messages, which in turn is
79 * largely a function of how much cycles are spent the processing of
82 #define DSW_MIGRATION_INTERVAL (1000)
83 #define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))
84 #define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))
86 #define DSW_MAX_EVENTS_RECORDED (128)
88 /* Only one outstanding migration per port is allowed */
89 #define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS)
91 /* Enough room for paus request/confirm and unpaus request/confirm for
92 * all possible senders.
94 #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
96 struct dsw_queue_flow {
101 enum dsw_migration_state {
102 DSW_MIGRATION_STATE_IDLE,
103 DSW_MIGRATION_STATE_PAUSING,
104 DSW_MIGRATION_STATE_FORWARDING,
105 DSW_MIGRATION_STATE_UNPAUSING
111 /* Keeping a pointer here to avoid container_of() calls, which
112 * are expensive since they are very frequent and will result
113 * in an integer multiplication (since the port id is an index
114 * into the dsw_evdev port array).
116 struct dsw_evdev *dsw;
118 uint16_t dequeue_depth;
119 uint16_t enqueue_depth;
121 int32_t inflight_credits;
123 int32_t new_event_threshold;
125 uint16_t pending_releases;
127 uint16_t next_parallel_flow_id;
129 uint16_t ops_since_bg_task;
131 /* most recent 'background' processing */
134 /* For port load measurement. */
135 uint64_t next_load_update;
136 uint64_t load_update_interval;
137 uint64_t measurement_start;
139 uint64_t busy_cycles;
140 uint64_t total_busy_cycles;
142 /* For the ctl interface and flow migration mechanism. */
143 uint64_t next_migration;
144 uint64_t migration_interval;
145 enum dsw_migration_state migration_state;
147 uint64_t migration_start;
149 uint64_t migration_latency;
151 uint8_t migration_target_port_id;
152 struct dsw_queue_flow migration_target_qf;
155 uint16_t paused_flows_len;
156 struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
158 /* In a very contrived worst case all inflight events can be
159 * laying around paused here.
161 uint16_t paused_events_len;
162 struct rte_event paused_events[DSW_MAX_EVENTS];
164 uint16_t seen_events_len;
165 uint16_t seen_events_idx;
166 struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
168 uint16_t out_buffer_len[DSW_MAX_PORTS];
169 struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
171 uint16_t in_buffer_len;
172 uint16_t in_buffer_start;
173 /* This buffer may contain events that were read up from the
174 * in_ring during the flow migration process.
176 struct rte_event in_buffer[DSW_MAX_EVENTS];
178 struct rte_event_ring *in_ring __rte_cache_aligned;
180 struct rte_ring *ctl_in_ring __rte_cache_aligned;
182 /* Estimate of current port load. */
183 rte_atomic16_t load __rte_cache_aligned;
184 } __rte_cache_aligned;
187 uint8_t schedule_type;
188 uint8_t serving_ports[DSW_MAX_PORTS];
189 uint16_t num_serving_ports;
191 uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
195 struct rte_eventdev_data *data;
197 struct dsw_port ports[DSW_MAX_PORTS];
199 struct dsw_queue queues[DSW_MAX_QUEUES];
201 int32_t max_inflight;
203 rte_atomic32_t credits_on_loan __rte_cache_aligned;
206 #define DSW_CTL_PAUS_REQ (0)
207 #define DSW_CTL_UNPAUS_REQ (1)
208 #define DSW_CTL_CFM (2)
210 /* sizeof(struct dsw_ctl_msg) must be equal or less than
211 * sizeof(void *), to fit on the control ring.
215 uint8_t originating_port_id:6;
220 uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
221 uint16_t dsw_event_enqueue_burst(void *port,
222 const struct rte_event events[],
223 uint16_t events_len);
224 uint16_t dsw_event_enqueue_new_burst(void *port,
225 const struct rte_event events[],
226 uint16_t events_len);
227 uint16_t dsw_event_enqueue_forward_burst(void *port,
228 const struct rte_event events[],
229 uint16_t events_len);
231 uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
232 uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
233 uint16_t num, uint64_t wait);
235 static inline struct dsw_evdev *
236 dsw_pmd_priv(const struct rte_eventdev *eventdev)
238 return eventdev->data->dev_private;
241 #define DSW_LOG_DP(level, fmt, args...) \
242 RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
244 __func__, __LINE__, ## args)
246 #define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
247 DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)