1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef __INCLUDE_RTE_SCHED_H__
6 #define __INCLUDE_RTE_SCHED_H__
14 * RTE Hierarchical Scheduler
16 * The hierarchical scheduler prioritizes the transmission of packets
17 * from different users and traffic classes according to the Service
18 * Level Agreements (SLAs) defined for the current network node.
20 * The scheduler supports thousands of packet queues grouped under a
23 * - Typical usage: output Ethernet port;
24 * - Multiple ports are scheduled in round robin order with
27 * - Typical usage: group of users;
28 * - Traffic shaping using the token bucket algorithm
29 * (one bucket per subport);
30 * - Upper limit enforced per traffic class at subport level;
31 * - Lower priority traffic classes able to reuse subport
32 * bandwidth currently unused by higher priority traffic
33 * classes of the same subport;
34 * - When any subport traffic class is oversubscribed
35 * (configuration time event), the usage of subport member
36 * pipes with high demand for that traffic class pipes is
37 * truncated to a dynamically adjusted value with no
38 * impact to low demand pipes;
40 * - Typical usage: individual user/subscriber;
41 * - Traffic shaping using the token bucket algorithm
42 * (one bucket per pipe);
44 * - Traffic classes of the same pipe handled in strict
46 * - Upper limit enforced per traffic class at the pipe level;
47 * - Lower priority traffic classes able to reuse pipe
48 * bandwidth currently unused by higher priority traffic
49 * classes of the same pipe;
51 * - Typical usage: queue hosting packets from one or
52 * multiple connections of same traffic class belonging to
54 * - Weighted Round Robin (WRR) is used to service the
55 * queues within same pipe lowest priority traffic class (best-effort).
59 #include <sys/types.h>
60 #include <rte_compat.h>
62 #include <rte_meter.h>
64 /** Random Early Detection (RED) */
69 /** Maximum number of queues per pipe.
70 * Note that the multiple queues (power of 2) can only be assigned to
71 * lowest priority (best-effort) traffic class. Other higher priority traffic
72 * classes can only have one queue.
75 * @see struct rte_sched_port_params
77 #define RTE_SCHED_QUEUES_PER_PIPE 16
79 /** Number of WRR queues for best-effort traffic class per pipe.
81 * @see struct rte_sched_pipe_params
83 #define RTE_SCHED_BE_QUEUES_PER_PIPE 4
85 /** Number of traffic classes per pipe (as well as subport).
86 * @see struct rte_sched_subport_params
87 * @see struct rte_sched_pipe_params
89 #define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE \
90 (RTE_SCHED_QUEUES_PER_PIPE - RTE_SCHED_BE_QUEUES_PER_PIPE + 1)
92 /** Best-effort traffic class ID
95 #define RTE_SCHED_TRAFFIC_CLASS_BE (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
98 * Ethernet framing overhead. Overhead fields per Ethernet frame:
99 * 1. Preamble: 7 bytes;
100 * 2. Start of Frame Delimiter (SFD): 1 byte;
101 * 3. Frame Check Sequence (FCS): 4 bytes;
102 * 4. Inter Frame Gap (IFG): 12 bytes.
104 * The FCS is considered overhead only if not included in the packet
105 * length (field pkt_len of struct rte_mbuf).
107 * @see struct rte_sched_port_params
109 #ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
110 #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
114 * Pipe configuration parameters. The period and credits_per_period
115 * parameters are measured in bytes, with one byte meaning the time
116 * duration associated with the transmission of one byte on the
117 * physical medium of the output port, with pipe or pipe traffic class
118 * rate (measured as percentage of output port rate) determined as
119 * credits_per_period divided by period. One credit represents one
122 struct rte_sched_pipe_params {
123 /** Token bucket rate (measured in bytes per second) */
126 /** Token bucket size (measured in credits) */
129 /** Traffic class rates (measured in bytes per second) */
130 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
132 /** Enforcement period (measured in milliseconds) */
135 /** Best-effort traffic class oversubscription weight */
136 uint8_t tc_ov_weight;
138 /** WRR weights of best-effort traffic class queues */
139 uint8_t wrr_weights[RTE_SCHED_BE_QUEUES_PER_PIPE];
143 * Subport configuration parameters. The period and credits_per_period
144 * parameters are measured in bytes, with one byte meaning the time
145 * duration associated with the transmission of one byte on the
146 * physical medium of the output port, with pipe or pipe traffic class
147 * rate (measured as percentage of output port rate) determined as
148 * credits_per_period divided by period. One credit represents one
151 struct rte_sched_subport_params {
152 /** Token bucket rate (measured in bytes per second) */
155 /** Token bucket size (measured in credits) */
158 /** Traffic class rates (measured in bytes per second) */
159 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
161 /** Enforcement period for rates (measured in milliseconds) */
164 /** Number of subport pipes.
165 * The subport can enable/allocate fewer pipes than the maximum
166 * number set through struct port_params::n_max_pipes_per_subport,
167 * as needed, to avoid memory allocation for the queues of the
168 * pipes that are not really needed.
170 uint32_t n_pipes_per_subport_enabled;
172 /** Packet queue size for each traffic class.
173 * All the pipes within the same subport share the similar
174 * configuration for the queues.
176 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
178 /** Pipe profile table.
179 * Every pipe is configured using one of the profiles from this table.
181 struct rte_sched_pipe_params *pipe_profiles;
183 /** Profiles in the pipe profile table */
184 uint32_t n_pipe_profiles;
186 /** Max allowed profiles in the pipe profile table */
187 uint32_t n_max_pipe_profiles;
190 /** RED parameters */
191 struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
195 /** Subport statistics */
196 struct rte_sched_subport_stats {
197 /** Number of packets successfully written */
198 uint64_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
200 /** Number of packets dropped */
201 uint64_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
203 /** Number of bytes successfully written for each traffic class */
204 uint64_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
206 /** Number of bytes dropped for each traffic class */
207 uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
210 /** Number of packets dropped by red */
211 uint64_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
215 /** Queue statistics */
216 struct rte_sched_queue_stats {
217 /** Packets successfully written */
220 /** Packets dropped */
221 uint64_t n_pkts_dropped;
224 /** Packets dropped by RED */
225 uint64_t n_pkts_red_dropped;
228 /** Bytes successfully written */
232 uint64_t n_bytes_dropped;
235 /** Port configuration parameters. */
236 struct rte_sched_port_params {
237 /** Name of the port to be associated */
243 /** Output port rate (measured in bytes per second) */
246 /** Maximum Ethernet frame size (measured in bytes).
247 * Should not include the framing overhead.
251 /** Framing overhead per packet (measured in bytes) */
252 uint32_t frame_overhead;
254 /** Number of subports */
255 uint32_t n_subports_per_port;
257 /** Maximum number of subport pipes.
258 * This parameter is used to reserve a fixed number of bits
259 * in struct rte_mbuf::sched.queue_id for the pipe_id for all
260 * the subports of the same port.
262 uint32_t n_pipes_per_subport;
271 * Hierarchical scheduler port configuration
274 * Port scheduler configuration parameter structure
276 * Handle to port scheduler instance upon success or NULL otherwise.
278 struct rte_sched_port *
279 rte_sched_port_config(struct rte_sched_port_params *params);
282 * Hierarchical scheduler port free
285 * Handle to port scheduler instance
288 rte_sched_port_free(struct rte_sched_port *port);
292 * @b EXPERIMENTAL: this API may change without prior notice.
294 * Hierarchical scheduler pipe profile add
297 * Handle to port scheduler instance
301 * Pipe profile parameters
302 * @param pipe_profile_id
303 * Set to valid profile id when profile is added successfully.
305 * 0 upon success, error code otherwise
309 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
311 struct rte_sched_pipe_params *params,
312 uint32_t *pipe_profile_id);
315 * Hierarchical scheduler subport configuration
318 * Handle to port scheduler instance
322 * Subport configuration parameters
324 * 0 upon success, error code otherwise
327 rte_sched_subport_config(struct rte_sched_port *port,
329 struct rte_sched_subport_params *params);
332 * Hierarchical scheduler pipe configuration
335 * Handle to port scheduler instance
339 * Pipe ID within subport
340 * @param pipe_profile
341 * ID of subport-level pre-configured pipe profile
343 * 0 upon success, error code otherwise
346 rte_sched_pipe_config(struct rte_sched_port *port,
349 int32_t pipe_profile);
352 * Hierarchical scheduler memory footprint size per port
355 * Port scheduler configuration parameter structure
356 * @param subport_params
357 * Array of subport parameter structures
359 * Memory footprint size in bytes upon success, 0 otherwise
362 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
363 struct rte_sched_subport_params **subport_params);
370 * Hierarchical scheduler subport statistics read
373 * Handle to port scheduler instance
377 * Pointer to pre-allocated subport statistics structure where the statistics
378 * counters should be stored
380 * Pointer to pre-allocated RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE-entry array
381 * where the oversubscription status for each of the subport traffic classes
384 * 0 upon success, error code otherwise
387 rte_sched_subport_read_stats(struct rte_sched_port *port,
389 struct rte_sched_subport_stats *stats,
393 * Hierarchical scheduler queue statistics read
396 * Handle to port scheduler instance
398 * Queue ID within port scheduler
400 * Pointer to pre-allocated subport statistics structure where the statistics
401 * counters should be stored
403 * Pointer to pre-allocated variable where the current queue length
406 * 0 upon success, error code otherwise
409 rte_sched_queue_read_stats(struct rte_sched_port *port,
411 struct rte_sched_queue_stats *stats,
415 * Scheduler hierarchy path write to packet descriptor. Typically
416 * called by the packet classification stage.
419 * Handle to port scheduler instance
421 * Packet descriptor handle
425 * Pipe ID within subport
426 * @param traffic_class
427 * Traffic class ID within pipe (0 .. RTE_SCHED_TRAFFIC_CLASS_BE)
429 * Queue ID within pipe traffic class, 0 for high priority TCs, and
430 * 0 .. (RTE_SCHED_BE_QUEUES_PER_PIPE - 1) for best-effort TC
435 rte_sched_port_pkt_write(struct rte_sched_port *port,
436 struct rte_mbuf *pkt,
437 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
438 uint32_t queue, enum rte_color color);
441 * Scheduler hierarchy path read from packet descriptor (struct
442 * rte_mbuf). Typically called as part of the hierarchical scheduler
443 * enqueue operation. The subport, pipe, traffic class and queue
444 * parameters need to be pre-allocated by the caller.
447 * Handle to port scheduler instance
449 * Packet descriptor handle
453 * Pipe ID within subport
454 * @param traffic_class
455 * Traffic class ID within pipe (0 .. RTE_SCHED_TRAFFIC_CLASS_BE)
457 * Queue ID within pipe traffic class, 0 for high priority TCs, and
458 * 0 .. (RTE_SCHED_BE_QUEUES_PER_PIPE - 1) for best-effort TC
461 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
462 const struct rte_mbuf *pkt,
463 uint32_t *subport, uint32_t *pipe,
464 uint32_t *traffic_class, uint32_t *queue);
467 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt);
470 * Hierarchical scheduler port enqueue. Writes up to n_pkts to port
471 * scheduler and returns the number of packets actually written. For
472 * each packet, the port scheduler queue to write the packet to is
473 * identified by reading the hierarchy path from the packet
474 * descriptor; if the queue is full or congested and the packet is not
475 * written to the queue, then the packet is automatically dropped
476 * without any action required from the caller.
479 * Handle to port scheduler instance
481 * Array storing the packet descriptor handles
483 * Number of packets to enqueue from the pkts array into the port scheduler
485 * Number of packets successfully enqueued
488 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
491 * Hierarchical scheduler port dequeue. Reads up to n_pkts from the
492 * port scheduler and stores them in the pkts array and returns the
493 * number of packets actually read. The pkts array needs to be
494 * pre-allocated by the caller with at least n_pkts entries.
497 * Handle to port scheduler instance
499 * Pre-allocated packet descriptor array where the packets dequeued
501 * scheduler should be stored
503 * Number of packets to dequeue from the port scheduler
505 * Number of packets successfully dequeued and placed in the pkts array
508 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
514 #endif /* __INCLUDE_RTE_SCHED_H__ */