X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_sched%2Frte_sched.h;h=8a5a93c98828580840e27606d8d7aa300e5829b1;hb=4bf2b36fd14da74f5dd5c111501698a5b7cb0f6d;hp=f78a311e2ddc399adec034f36d56377880594aea;hpb=835c5409a7bac3055b82bebee65d8ada7f20d332;p=dpdk.git diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h index f78a311e2d..8a5a93c988 100644 --- a/lib/librte_sched/rte_sched.h +++ b/lib/librte_sched/rte_sched.h @@ -1,35 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #ifndef __INCLUDE_RTE_SCHED_H__ @@ -43,41 +13,51 @@ extern "C" { * @file * RTE Hierarchical Scheduler * - * The hierarchical scheduler prioritizes the transmission of packets from different - * users and traffic classes according to the Service Level Agreements (SLAs) defined - * for the current network node. + * The hierarchical scheduler prioritizes the transmission of packets + * from different users and traffic classes according to the Service + * Level Agreements (SLAs) defined for the current network node. * - * The scheduler supports thousands of packet queues grouped under a 5-level hierarchy: - * 1. Port: + * The scheduler supports thousands of packet queues grouped under a + * 5-level hierarchy: + * 1. Port: * - Typical usage: output Ethernet port; - * - Multiple ports are scheduled in round robin order with equal priority; + * - Multiple ports are scheduled in round robin order with + * equal priority; * 2. Subport: * - Typical usage: group of users; - * - Traffic shaping using the token bucket algorithm (one bucket per subport); + * - Traffic shaping using the token bucket algorithm + * (one bucket per subport); * - Upper limit enforced per traffic class at subport level; - * - Lower priority traffic classes able to reuse subport bandwidth currently - * unused by higher priority traffic classes of the same subport; - * - When any subport traffic class is oversubscribed (configuration time - * event), the usage of subport member pipes with high demand for that - * traffic class pipes is truncated to a dynamically adjusted value with no + * - Lower priority traffic classes able to reuse subport + * bandwidth currently unused by higher priority traffic + * classes of the same subport; + * - When any subport traffic class is oversubscribed + * (configuration time event), the usage of subport member + * pipes with high demand for that traffic class pipes is + * truncated to a dynamically adjusted value with no * impact to low demand pipes; - * 3. Pipe: + * 3. Pipe: * - Typical usage: individual user/subscriber; - * - Traffic shaping using the token bucket algorithm (one bucket per pipe); + * - Traffic shaping using the token bucket algorithm + * (one bucket per pipe); * 4. Traffic class: - * - Traffic classes of the same pipe handled in strict priority order; + * - Traffic classes of the same pipe handled in strict + * priority order; * - Upper limit enforced per traffic class at the pipe level; - * - Lower priority traffic classes able to reuse pipe bandwidth currently - * unused by higher priority traffic classes of the same pipe; + * - Lower priority traffic classes able to reuse pipe + * bandwidth currently unused by higher priority traffic + * classes of the same pipe; * 5. Queue: - * - Typical usage: queue hosting packets from one or multiple connections - * of same traffic class belonging to the same user; - * - Weighted Round Robin (WRR) is used to service the queues within same - * pipe traffic class. + * - Typical usage: queue hosting packets from one or + * multiple connections of same traffic class belonging to + * the same user; + * - Weighted Round Robin (WRR) is used to service the + * queues within same pipe lowest priority traffic class (best-effort). * - ***/ + */ #include +#include #include #include @@ -86,126 +66,200 @@ extern "C" { #include "rte_red.h" #endif -/** Number of traffic classes per pipe (as well as subport). Cannot be changed. */ -#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE 4 +/** Maximum number of queues per pipe. + * Note that the multiple queues (power of 2) can only be assigned to + * lowest priority (best-effort) traffic class. Other higher priority traffic + * classes can only have one queue. + * Can not change. + * + * @see struct rte_sched_port_params + */ +#define RTE_SCHED_QUEUES_PER_PIPE 16 -/** Number of queues per pipe traffic class. Cannot be changed. */ -#define RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS 4 +/** Number of WRR queues for best-effort traffic class per pipe. + * + * @see struct rte_sched_pipe_params + */ +#define RTE_SCHED_BE_QUEUES_PER_PIPE 4 -/** Number of queues per pipe. */ -#define RTE_SCHED_QUEUES_PER_PIPE \ - (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * \ - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) +/** Number of traffic classes per pipe (as well as subport). + * @see struct rte_sched_subport_params + * @see struct rte_sched_pipe_params + */ +#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE \ +(RTE_SCHED_QUEUES_PER_PIPE - RTE_SCHED_BE_QUEUES_PER_PIPE + 1) -/** Maximum number of pipe profiles that can be defined per port. Compile-time configurable.*/ -#ifndef RTE_SCHED_PIPE_PROFILES_PER_PORT -#define RTE_SCHED_PIPE_PROFILES_PER_PORT 256 -#endif +/** Best-effort traffic class ID + * Can not change. + */ +#define RTE_SCHED_TRAFFIC_CLASS_BE (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) -/** Ethernet framing overhead. Overhead fields per Ethernet frame: - 1. Preamble: 7 bytes; - 2. Start of Frame Delimiter (SFD): 1 byte; - 3. Frame Check Sequence (FCS): 4 bytes; - 4. Inter Frame Gap (IFG): 12 bytes. -The FCS is considered overhead only if not included in the packet length (field pkt.pkt_len -of struct rte_mbuf). */ +/* + * Ethernet framing overhead. Overhead fields per Ethernet frame: + * 1. Preamble: 7 bytes; + * 2. Start of Frame Delimiter (SFD): 1 byte; + * 3. Frame Check Sequence (FCS): 4 bytes; + * 4. Inter Frame Gap (IFG): 12 bytes. + * + * The FCS is considered overhead only if not included in the packet + * length (field pkt_len of struct rte_mbuf). + * + * @see struct rte_sched_port_params + */ #ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24 #endif -/** Subport configuration parameters. The period and credits_per_period parameters are measured -in bytes, with one byte meaning the time duration associated with the transmission of one byte -on the physical medium of the output port, with pipe or pipe traffic class rate (measured as -percentage of output port rate) determined as credits_per_period divided by period. One credit -represents one byte. */ +/* + * Pipe configuration parameters. The period and credits_per_period + * parameters are measured in bytes, with one byte meaning the time + * duration associated with the transmission of one byte on the + * physical medium of the output port, with pipe or pipe traffic class + * rate (measured as percentage of output port rate) determined as + * credits_per_period divided by period. One credit represents one + * byte. + */ +struct rte_sched_pipe_params { + /** Token bucket rate (measured in bytes per second) */ + uint64_t tb_rate; + + /** Token bucket size (measured in credits) */ + uint64_t tb_size; + + /** Traffic class rates (measured in bytes per second) */ + uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Enforcement period (measured in milliseconds) */ + uint64_t tc_period; + + /** Best-effort traffic class oversubscription weight */ + uint8_t tc_ov_weight; + + /** WRR weights of best-effort traffic class queues */ + uint8_t wrr_weights[RTE_SCHED_BE_QUEUES_PER_PIPE]; +}; + +/* + * Subport configuration parameters. The period and credits_per_period + * parameters are measured in bytes, with one byte meaning the time + * duration associated with the transmission of one byte on the + * physical medium of the output port, with pipe or pipe traffic class + * rate (measured as percentage of output port rate) determined as + * credits_per_period divided by period. One credit represents one + * byte. + */ struct rte_sched_subport_params { - /* Subport token bucket */ - uint32_t tb_rate; /**< Subport token bucket rate (measured in bytes per second) */ - uint32_t tb_size; /**< Subport token bucket size (measured in credits) */ - - /* Subport traffic classes */ - uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Subport traffic class rates (measured in bytes per second) */ - uint32_t tc_period; /**< Enforcement period for traffic class rates (measured in milliseconds) */ + /** Token bucket rate (measured in bytes per second) */ + uint64_t tb_rate; + + /** Token bucket size (measured in credits) */ + uint64_t tb_size; + + /** Traffic class rates (measured in bytes per second) */ + uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Enforcement period for rates (measured in milliseconds) */ + uint64_t tc_period; + + /** Number of subport pipes. + * The subport can enable/allocate fewer pipes than the maximum + * number set through struct port_params::n_max_pipes_per_subport, + * as needed, to avoid memory allocation for the queues of the + * pipes that are not really needed. + */ + uint32_t n_pipes_per_subport_enabled; + + /** Packet queue size for each traffic class. + * All the pipes within the same subport share the similar + * configuration for the queues. + */ + uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Pipe profile table. + * Every pipe is configured using one of the profiles from this table. + */ + struct rte_sched_pipe_params *pipe_profiles; + + /** Profiles in the pipe profile table */ + uint32_t n_pipe_profiles; + + /** Max allowed profiles in the pipe profile table */ + uint32_t n_max_pipe_profiles; + +#ifdef RTE_SCHED_RED + /** RED parameters */ + struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; +#endif }; /** Subport statistics */ struct rte_sched_subport_stats { - /* Packets */ - uint32_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets successfully written to current - subport for each traffic class */ - uint32_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets dropped by the current - subport for each traffic class due to subport queues being full or congested*/ - - /* Bytes */ - uint32_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes successfully written to current - subport for each traffic class*/ - uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes dropped by the current - subport for each traffic class due to subport queues being full or congested */ -}; + /** Number of packets successfully written */ + uint64_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; -/** Pipe configuration parameters. The period and credits_per_period parameters are measured -in bytes, with one byte meaning the time duration associated with the transmission of one byte -on the physical medium of the output port, with pipe or pipe traffic class rate (measured as -percentage of output port rate) determined as credits_per_period divided by period. One credit -represents one byte. */ -struct rte_sched_pipe_params { - /* Pipe token bucket */ - uint32_t tb_rate; /**< Pipe token bucket rate (measured in bytes per second) */ - uint32_t tb_size; /**< Pipe token bucket size (measured in credits) */ - - /* Pipe traffic classes */ - uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Pipe traffic class rates (measured in bytes per second) */ - uint32_t tc_period; /**< Enforcement period for pipe traffic class rates (measured in milliseconds) */ -#ifdef RTE_SCHED_SUBPORT_TC_OV - uint8_t tc_ov_weight; /**< Weight for the current pipe in the event of subport traffic class 3 oversubscription */ + /** Number of packets dropped */ + uint64_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Number of bytes successfully written for each traffic class */ + uint64_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Number of bytes dropped for each traffic class */ + uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + +#ifdef RTE_SCHED_RED + /** Number of packets dropped by red */ + uint64_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; #endif - - /* Pipe queues */ - uint8_t wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR weights for the queues of the current pipe */ }; /** Queue statistics */ struct rte_sched_queue_stats { - /* Packets */ - uint32_t n_pkts; /**< Number of packets successfully written to current queue */ - uint32_t n_pkts_dropped; /**< Number of packets dropped due to current queue being full or congested */ - - /* Bytes */ - uint32_t n_bytes; /**< Number of bytes successfully written to current queue */ - uint32_t n_bytes_dropped; /**< Number of bytes dropped due to current queue being full or congested */ -}; + /** Packets successfully written */ + uint64_t n_pkts; + + /** Packets dropped */ + uint64_t n_pkts_dropped; -/** Port configuration parameters. */ -struct rte_sched_port_params { - const char *name; /**< Literal string to be associated to the current port scheduler instance */ - int socket; /**< CPU socket ID where the memory for port scheduler should be allocated */ - uint32_t rate; /**< Output port rate (measured in bytes per second) */ - uint32_t mtu; /**< Maximum Ethernet frame size (measured in bytes). Should not include the framing overhead. */ - uint32_t frame_overhead; /**< Framing overhead per packet (measured in bytes) */ - uint32_t n_subports_per_port; /**< Number of subports for the current port scheduler instance*/ - uint32_t n_pipes_per_subport; /**< Number of pipes for each port scheduler subport */ - uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Packet queue size for each traffic class. All queues - within the same pipe traffic class have the same size. Queues from - different pipes serving the same traffic class have the same size. */ - struct rte_sched_pipe_params *pipe_profiles; /**< Pipe profile table defined for current port scheduler instance. - Every pipe of the current port scheduler is configured using one of the - profiles from this table. */ - uint32_t n_pipe_profiles; /**< Number of profiles in the pipe profile table */ #ifdef RTE_SCHED_RED - struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS]; /**< RED parameters */ + /** Packets dropped by RED */ + uint64_t n_pkts_red_dropped; #endif + + /** Bytes successfully written */ + uint64_t n_bytes; + + /** Bytes dropped */ + uint64_t n_bytes_dropped; }; -/** Path through the scheduler hierarchy used by the scheduler enqueue operation to -identify the destination queue for the current packet. Stored in the field pkt.hash.sched -of struct rte_mbuf of each packet, typically written by the classification stage and read by -scheduler enqueue.*/ -struct rte_sched_port_hierarchy { - uint32_t queue:2; /**< Queue ID (0 .. 3) */ - uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/ - uint32_t pipe:20; /**< Pipe ID */ - uint32_t subport:6; /**< Subport ID */ - uint32_t color:2; /**< Color */ +/** Port configuration parameters. */ +struct rte_sched_port_params { + /** Name of the port to be associated */ + const char *name; + + /** CPU socket ID */ + int socket; + + /** Output port rate (measured in bytes per second) */ + uint64_t rate; + + /** Maximum Ethernet frame size (measured in bytes). + * Should not include the framing overhead. + */ + uint32_t mtu; + + /** Framing overhead per packet (measured in bytes) */ + uint32_t frame_overhead; + + /** Number of subports */ + uint32_t n_subports_per_port; + + /** Maximum number of subport pipes. + * This parameter is used to reserve a fixed number of bits + * in struct rte_mbuf::sched.queue_id for the pipe_id for all + * the subports of the same port. + */ + uint32_t n_pipes_per_subport; }; /* @@ -221,7 +275,7 @@ struct rte_sched_port_hierarchy { * @return * Handle to port scheduler instance upon success or NULL otherwise. */ -struct rte_sched_port * +struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params); /** @@ -233,6 +287,30 @@ rte_sched_port_config(struct rte_sched_port_params *params); void rte_sched_port_free(struct rte_sched_port *port); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Hierarchical scheduler pipe profile add + * + * @param port + * Handle to port scheduler instance + * @param subport_id + * Subport ID + * @param params + * Pipe profile parameters + * @param pipe_profile_id + * Set to valid profile id when profile is added successfully. + * @return + * 0 upon success, error code otherwise + */ +__rte_experimental +int +rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, + uint32_t subport_id, + struct rte_sched_pipe_params *params, + uint32_t *pipe_profile_id); + /** * Hierarchical scheduler subport configuration * @@ -246,7 +324,7 @@ rte_sched_port_free(struct rte_sched_port *port); * 0 upon success, error code otherwise */ int -rte_sched_subport_config(struct rte_sched_port *port, +rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params); @@ -260,29 +338,31 @@ rte_sched_subport_config(struct rte_sched_port *port, * @param pipe_id * Pipe ID within subport * @param pipe_profile - * ID of port-level pre-configured pipe profile + * ID of subport-level pre-configured pipe profile * @return * 0 upon success, error code otherwise */ int rte_sched_pipe_config(struct rte_sched_port *port, - uint32_t subport_id, + uint32_t subport_id, uint32_t pipe_id, int32_t pipe_profile); /** * Hierarchical scheduler memory footprint size per port * - * @param params + * @param port_params * Port scheduler configuration parameter structure + * @param subport_params + * Array of subport parameter structures * @return * Memory footprint size in bytes upon success, 0 otherwise */ uint32_t -rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params); - +rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params, + struct rte_sched_subport_params **subport_params); /* - * Statistics + * Statistics * ***/ @@ -294,11 +374,12 @@ rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params); * @param subport_id * Subport ID * @param stats - * Pointer to pre-allocated subport statistics structure where the statistics + * Pointer to pre-allocated subport statistics structure where the statistics * counters should be stored * @param tc_ov - * Pointer to pre-allocated 4-entry array where the oversubscription status for - * each of the 4 subport traffic classes should be stored. + * Pointer to pre-allocated RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE-entry array + * where the oversubscription status for each of the subport traffic classes + * should be stored. * @return * 0 upon success, error code otherwise */ @@ -316,10 +397,11 @@ rte_sched_subport_read_stats(struct rte_sched_port *port, * @param queue_id * Queue ID within port scheduler * @param stats - * Pointer to pre-allocated subport statistics structure where the statistics + * Pointer to pre-allocated subport statistics structure where the statistics * counters should be stored * @param qlen - * Pointer to pre-allocated variable where the current queue length should be stored. + * Pointer to pre-allocated variable where the current queue length + * should be stored. * @return * 0 upon success, error code otherwise */ @@ -329,15 +411,12 @@ rte_sched_queue_read_stats(struct rte_sched_port *port, struct rte_sched_queue_stats *stats, uint16_t *qlen); -/* - * Run-time - * - ***/ - /** - * Scheduler hierarchy path write to packet descriptor. Typically called by the - * packet classification stage. - * + * Scheduler hierarchy path write to packet descriptor. Typically + * called by the packet classification stage. + * + * @param port + * Handle to port scheduler instance * @param pkt * Packet descriptor handle * @param subport @@ -345,28 +424,27 @@ rte_sched_queue_read_stats(struct rte_sched_port *port, * @param pipe * Pipe ID within subport * @param traffic_class - * Traffic class ID within pipe (0 .. 3) + * Traffic class ID within pipe (0 .. RTE_SCHED_TRAFFIC_CLASS_BE) * @param queue - * Queue ID within pipe traffic class (0 .. 3) + * Queue ID within pipe traffic class, 0 for high priority TCs, and + * 0 .. (RTE_SCHED_BE_QUEUES_PER_PIPE - 1) for best-effort TC + * @param color + * Packet color set */ -static inline void -rte_sched_port_pkt_write(struct rte_mbuf *pkt, - uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color) -{ - struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched; - - sched->color = (uint32_t) color; - sched->subport = subport; - sched->pipe = pipe; - sched->traffic_class = traffic_class; - sched->queue = queue; -} +void +rte_sched_port_pkt_write(struct rte_sched_port *port, + struct rte_mbuf *pkt, + uint32_t subport, uint32_t pipe, uint32_t traffic_class, + uint32_t queue, enum rte_color color); /** - * Scheduler hierarchy path read from packet descriptor (struct rte_mbuf). Typically - * called as part of the hierarchical scheduler enqueue operation. The subport, - * pipe, traffic class and queue parameters need to be pre-allocated by the caller. + * Scheduler hierarchy path read from packet descriptor (struct + * rte_mbuf). Typically called as part of the hierarchical scheduler + * enqueue operation. The subport, pipe, traffic class and queue + * parameters need to be pre-allocated by the caller. * + * @param port + * Handle to port scheduler instance * @param pkt * Packet descriptor handle * @param subport @@ -374,37 +452,28 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt, * @param pipe * Pipe ID within subport * @param traffic_class - * Traffic class ID within pipe (0 .. 3) + * Traffic class ID within pipe (0 .. RTE_SCHED_TRAFFIC_CLASS_BE) * @param queue - * Queue ID within pipe traffic class (0 .. 3) - * + * Queue ID within pipe traffic class, 0 for high priority TCs, and + * 0 .. (RTE_SCHED_BE_QUEUES_PER_PIPE - 1) for best-effort TC */ -static inline void -rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue) -{ - struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched; - - *subport = sched->subport; - *pipe = sched->pipe; - *traffic_class = sched->traffic_class; - *queue = sched->queue; -} - -static inline enum rte_meter_color -rte_sched_port_pkt_read_color(struct rte_mbuf *pkt) -{ - struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched; +void +rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port, + const struct rte_mbuf *pkt, + uint32_t *subport, uint32_t *pipe, + uint32_t *traffic_class, uint32_t *queue); - return (enum rte_meter_color) sched->color; -} +enum rte_color +rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt); /** - * Hierarchical scheduler port enqueue. Writes up to n_pkts to port scheduler and - * returns the number of packets actually written. For each packet, the port scheduler - * queue to write the packet to is identified by reading the hierarchy path from the - * packet descriptor; if the queue is full or congested and the packet is not written - * to the queue, then the packet is automatically dropped without any action required - * from the caller. + * Hierarchical scheduler port enqueue. Writes up to n_pkts to port + * scheduler and returns the number of packets actually written. For + * each packet, the port scheduler queue to write the packet to is + * identified by reading the hierarchy path from the packet + * descriptor; if the queue is full or congested and the packet is not + * written to the queue, then the packet is automatically dropped + * without any action required from the caller. * * @param port * Handle to port scheduler instance @@ -419,14 +488,16 @@ int rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts); /** - * Hierarchical scheduler port dequeue. Reads up to n_pkts from the port scheduler - * and stores them in the pkts array and returns the number of packets actually read. - * The pkts array needs to be pre-allocated by the caller with at least n_pkts entries. + * Hierarchical scheduler port dequeue. Reads up to n_pkts from the + * port scheduler and stores them in the pkts array and returns the + * number of packets actually read. The pkts array needs to be + * pre-allocated by the caller with at least n_pkts entries. * * @param port * Handle to port scheduler instance * @param pkts - * Pre-allocated packet descriptor array where the packets dequeued from the port + * Pre-allocated packet descriptor array where the packets dequeued + * from the port * scheduler should be stored * @param n_pkts * Number of packets to dequeue from the port scheduler