4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef __INCLUDE_RTE_SCHED_H__
36 #define __INCLUDE_RTE_SCHED_H__
44 * RTE Hierarchical Scheduler
46 * The hierarchical scheduler prioritizes the transmission of packets from different
47 * users and traffic classes according to the Service Level Agreements (SLAs) defined
48 * for the current network node.
50 * The scheduler supports thousands of packet queues grouped under a 5-level hierarchy:
52 * - Typical usage: output Ethernet port;
53 * - Multiple ports are scheduled in round robin order with equal priority;
55 * - Typical usage: group of users;
56 * - Traffic shaping using the token bucket algorithm (one bucket per subport);
57 * - Upper limit enforced per traffic class at subport level;
58 * - Lower priority traffic classes able to reuse subport bandwidth currently
59 * unused by higher priority traffic classes of the same subport;
60 * - When any subport traffic class is oversubscribed (configuration time
61 * event), the usage of subport member pipes with high demand for that
62 * traffic class pipes is truncated to a dynamically adjusted value with no
63 * impact to low demand pipes;
65 * - Typical usage: individual user/subscriber;
66 * - Traffic shaping using the token bucket algorithm (one bucket per pipe);
68 * - Traffic classes of the same pipe handled in strict priority order;
69 * - Upper limit enforced per traffic class at the pipe level;
70 * - Lower priority traffic classes able to reuse pipe bandwidth currently
71 * unused by higher priority traffic classes of the same pipe;
73 * - Typical usage: queue hosting packets from one or multiple connections
74 * of same traffic class belonging to the same user;
75 * - Weighted Round Robin (WRR) is used to service the queues within same
80 #include <sys/types.h>
82 #include <rte_meter.h>
84 /** Random Early Detection (RED) */
89 /** Number of traffic classes per pipe (as well as subport). Cannot be changed. */
90 #define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE 4
92 /** Number of queues per pipe traffic class. Cannot be changed. */
93 #define RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS 4
95 /** Number of queues per pipe. */
96 #define RTE_SCHED_QUEUES_PER_PIPE \
97 (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * \
98 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
100 /** Maximum number of pipe profiles that can be defined per port. Compile-time configurable.*/
101 #ifndef RTE_SCHED_PIPE_PROFILES_PER_PORT
102 #define RTE_SCHED_PIPE_PROFILES_PER_PORT 256
105 /** Ethernet framing overhead. Overhead fields per Ethernet frame:
106 1. Preamble: 7 bytes;
107 2. Start of Frame Delimiter (SFD): 1 byte;
108 3. Frame Check Sequence (FCS): 4 bytes;
109 4. Inter Frame Gap (IFG): 12 bytes.
110 The FCS is considered overhead only if not included in the packet length (field pkt.pkt_len
111 of struct rte_mbuf). */
112 #ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
113 #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
116 /** Subport configuration parameters. The period and credits_per_period parameters are measured
117 in bytes, with one byte meaning the time duration associated with the transmission of one byte
118 on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
119 percentage of output port rate) determined as credits_per_period divided by period. One credit
120 represents one byte. */
121 struct rte_sched_subport_params {
122 /* Subport token bucket */
123 uint32_t tb_rate; /**< Subport token bucket rate (measured in bytes per second) */
124 uint32_t tb_size; /**< Subport token bucket size (measured in credits) */
126 /* Subport traffic classes */
127 uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Subport traffic class rates (measured in bytes per second) */
128 uint32_t tc_period; /**< Enforcement period for traffic class rates (measured in milliseconds) */
129 #ifdef RTE_SCHED_SUBPORT_TC_OV
130 uint32_t tc_ov_period; /**< Enforcement period for traffic class oversubscription (measured in milliseconds) */
134 /** Subport statistics */
135 struct rte_sched_subport_stats {
137 uint32_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets successfully written to current
138 subport for each traffic class */
139 uint32_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets dropped by the current
140 subport for each traffic class due to subport queues being full or congested*/
143 uint32_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes successfully written to current
144 subport for each traffic class*/
145 uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes dropped by the current
146 subport for each traffic class due to subport queues being full or congested */
149 /** Pipe configuration parameters. The period and credits_per_period parameters are measured
150 in bytes, with one byte meaning the time duration associated with the transmission of one byte
151 on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
152 percentage of output port rate) determined as credits_per_period divided by period. One credit
153 represents one byte. */
154 struct rte_sched_pipe_params {
155 /* Pipe token bucket */
156 uint32_t tb_rate; /**< Pipe token bucket rate (measured in bytes per second) */
157 uint32_t tb_size; /**< Pipe token bucket size (measured in credits) */
159 /* Pipe traffic classes */
160 uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Pipe traffic class rates (measured in bytes per second) */
161 uint32_t tc_period; /**< Enforcement period for pipe traffic class rates (measured in milliseconds) */
162 #ifdef RTE_SCHED_SUBPORT_TC_OV
163 uint8_t tc_ov_weight[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Traffic class weights to be used for the
164 current pipe in the event of subport traffic class oversubscription */
168 uint8_t wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR weights for the queues of the current pipe */
171 /** Queue statistics */
172 struct rte_sched_queue_stats {
174 uint32_t n_pkts; /**< Number of packets successfully written to current queue */
175 uint32_t n_pkts_dropped; /**< Number of packets dropped due to current queue being full or congested */
178 uint32_t n_bytes; /**< Number of bytes successfully written to current queue */
179 uint32_t n_bytes_dropped; /**< Number of bytes dropped due to current queue being full or congested */
182 /** Port configuration parameters. */
183 struct rte_sched_port_params {
184 const char *name; /**< Literal string to be associated to the current port scheduler instance */
185 int socket; /**< CPU socket ID where the memory for port scheduler should be allocated */
186 uint32_t rate; /**< Output port rate (measured in bytes per second) */
187 uint32_t frame_overhead; /**< Framing overhead per packet (measured in bytes) */
188 uint32_t n_subports_per_port; /**< Number of subports for the current port scheduler instance*/
189 uint32_t n_pipes_per_subport; /**< Number of pipes for each port scheduler subport */
190 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Packet queue size for each traffic class. All queues
191 within the same pipe traffic class have the same size. Queues from
192 different pipes serving the same traffic class have the same size. */
193 struct rte_sched_pipe_params *pipe_profiles; /**< Pipe profile table defined for current port scheduler instance.
194 Every pipe of the current port scheduler is configured using one of the
195 profiles from this table. */
196 uint32_t n_pipe_profiles; /**< Number of profiles in the pipe profile table */
198 struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS]; /**< RED parameters */
202 /** Path through the scheduler hierarchy used by the scheduler enqueue operation to
203 identify the destination queue for the current packet. Stored in the field pkt.hash.sched
204 of struct rte_mbuf of each packet, typically written by the classification stage and read by
206 struct rte_sched_port_hierarchy {
207 uint32_t queue:2; /**< Queue ID (0 .. 3) */
208 uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
209 uint32_t pipe:20; /**< Pipe ID */
210 uint32_t subport:6; /**< Subport ID */
211 uint32_t color:2; /**< Color */
220 * Hierarchical scheduler port configuration
223 * Port scheduler configuration parameter structure
225 * Handle to port scheduler instance upon success or NULL otherwise.
227 struct rte_sched_port *
228 rte_sched_port_config(struct rte_sched_port_params *params);
231 * Hierarchical scheduler port free
234 * Handle to port scheduler instance
237 rte_sched_port_free(struct rte_sched_port *port);
240 * Hierarchical scheduler subport configuration
243 * Handle to port scheduler instance
247 * Subport configuration parameters
249 * 0 upon success, error code otherwise
252 rte_sched_subport_config(struct rte_sched_port *port,
254 struct rte_sched_subport_params *params);
257 * Hierarchical scheduler pipe configuration
260 * Handle to port scheduler instance
264 * Pipe ID within subport
265 * @param pipe_profile
266 * ID of port-level pre-configured pipe profile
268 * 0 upon success, error code otherwise
271 rte_sched_pipe_config(struct rte_sched_port *port,
274 int32_t pipe_profile);
277 * Hierarchical scheduler memory footprint size per port
280 * Port scheduler configuration parameter structure
282 * Memory footprint size in bytes upon success, 0 otherwise
285 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
293 * Hierarchical scheduler subport statistics read
296 * Handle to port scheduler instance
300 * Pointer to pre-allocated subport statistics structure where the statistics
301 * counters should be stored
303 * Pointer to pre-allocated 4-entry array where the oversubscription status for
304 * each of the 4 subport traffic classes should be stored.
306 * 0 upon success, error code otherwise
309 rte_sched_subport_read_stats(struct rte_sched_port *port,
311 struct rte_sched_subport_stats *stats,
315 * Hierarchical scheduler queue statistics read
318 * Handle to port scheduler instance
320 * Queue ID within port scheduler
322 * Pointer to pre-allocated subport statistics structure where the statistics
323 * counters should be stored
325 * Pointer to pre-allocated variable where the current queue length should be stored.
327 * 0 upon success, error code otherwise
330 rte_sched_queue_read_stats(struct rte_sched_port *port,
332 struct rte_sched_queue_stats *stats,
341 * Scheduler hierarchy path write to packet descriptor. Typically called by the
342 * packet classification stage.
345 * Packet descriptor handle
349 * Pipe ID within subport
350 * @param traffic_class
351 * Traffic class ID within pipe (0 .. 3)
353 * Queue ID within pipe traffic class (0 .. 3)
356 rte_sched_port_pkt_write(struct rte_mbuf *pkt,
357 uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
359 struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
361 sched->color = (uint32_t) color;
362 sched->subport = subport;
364 sched->traffic_class = traffic_class;
365 sched->queue = queue;
369 * Scheduler hierarchy path read from packet descriptor (struct rte_mbuf). Typically
370 * called as part of the hierarchical scheduler enqueue operation. The subport,
371 * pipe, traffic class and queue parameters need to be pre-allocated by the caller.
374 * Packet descriptor handle
378 * Pipe ID within subport
379 * @param traffic_class
380 * Traffic class ID within pipe (0 .. 3)
382 * Queue ID within pipe traffic class (0 .. 3)
386 rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
388 struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
390 *subport = sched->subport;
392 *traffic_class = sched->traffic_class;
393 *queue = sched->queue;
396 static inline enum rte_meter_color
397 rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
399 struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
401 return (enum rte_meter_color) sched->color;
405 * Hierarchical scheduler port enqueue. Writes up to n_pkts to port scheduler and
406 * returns the number of packets actually written. For each packet, the port scheduler
407 * queue to write the packet to is identified by reading the hierarchy path from the
408 * packet descriptor; if the queue is full or congested and the packet is not written
409 * to the queue, then the packet is automatically dropped without any action required
413 * Handle to port scheduler instance
415 * Array storing the packet descriptor handles
417 * Number of packets to enqueue from the pkts array into the port scheduler
419 * Number of packets successfully enqueued
422 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
425 * Hierarchical scheduler port dequeue. Reads up to n_pkts from the port scheduler
426 * and stores them in the pkts array and returns the number of packets actually read.
427 * The pkts array needs to be pre-allocated by the caller with at least n_pkts entries.
430 * Handle to port scheduler instance
432 * Pre-allocated packet descriptor array where the packets dequeued from the port
433 * scheduler should be stored
435 * Number of packets to dequeue from the port scheduler
437 * Number of packets successfully dequeued and placed in the pkts array
440 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
446 #endif /* __INCLUDE_RTE_SCHED_H__ */