+ struct rte_sched_pipe_params *params,
+ uint32_t *pipe_profile_id)
+{
+ struct rte_sched_subport *s;
+ struct rte_sched_pipe_profile *pp;
+ uint32_t i;
+ int status;
+
+ /* Port */
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Subport id not exceeds the max limit */
+ if (subport_id > port->n_subports_per_port) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for subport id\n", __func__);
+ return -EINVAL;
+ }
+
+ s = port->subports[subport_id];
+
+ /* Pipe profiles exceeds the max limit */
+ if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Number of pipe profiles exceeds the max limit\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Pipe params */
+ status = pipe_profile_check(params, port->rate, &s->qsize[0]);
+ if (status != 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Pipe profile check failed(%d)\n", __func__, status);
+ return -EINVAL;
+ }
+
+ pp = &s->pipe_profiles[s->n_pipe_profiles];
+ rte_sched_pipe_profile_convert(s, params, pp, port->rate);
+
+ /* Pipe profile should not exists */
+ for (i = 0; i < s->n_pipe_profiles; i++)
+ if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Pipe profile exists\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Pipe profile commit */
+ *pipe_profile_id = s->n_pipe_profiles;
+ s->n_pipe_profiles++;
+
+ if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
+ s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
+
+ rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
+
+ return 0;
+}
+
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+ uint32_t subport,
+ uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue)
+{
+ return ((subport & (port->n_subports_per_port - 1)) <<
+ (port->n_pipes_per_subport_log2 + 4)) |
+ ((pipe &
+ (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
+ ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
+ (RTE_SCHED_QUEUES_PER_PIPE - 1));
+}
+
+void
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
+ uint32_t subport, uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue, enum rte_color color)
+{
+ uint32_t queue_id =
+ rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
+
+ rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
+}
+
+void
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
+ uint32_t *subport, uint32_t *pipe,
+ uint32_t *traffic_class, uint32_t *queue)
+{
+ uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
+
+ *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
+ *pipe = (queue_id >> 4) &
+ (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
+ *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
+ *queue = rte_sched_port_tc_queue(port, queue_id);
+}
+
+enum rte_color
+rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
+{
+ return (enum rte_color)rte_mbuf_sched_color_get(pkt);
+}
+
+int
+rte_sched_subport_read_stats(struct rte_sched_port *port,
+ uint32_t subport_id,
+ struct rte_sched_subport_stats *stats,
+ uint32_t *tc_ov)