1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
27 #ifdef RTE_SCHED_VECTOR
31 #define SCHED_VECTOR_SSE4
32 #elif defined(__ARM_NEON)
33 #define SCHED_VECTOR_NEON
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
39 #define RTE_SCHED_WRR_SHIFT 3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
45 /* Scaling for cycles_per_byte calculation
46 * Chosen so that minimum rate is 480 bit/sec
48 #define RTE_SCHED_TIME_SHIFT 8
50 struct rte_sched_pipe_profile {
51 /* Token bucket (TB) */
53 uint64_t tb_credits_per_period;
56 /* Pipe traffic classes */
58 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
61 /* Pipe best-effort traffic class queues */
62 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
65 struct rte_sched_pipe {
66 /* Token bucket (TB) */
67 uint64_t tb_time; /* time of last update */
70 /* Pipe profile and flags */
73 /* Traffic classes (TCs) */
74 uint64_t tc_time; /* time of next update */
75 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
77 /* Weighted Round Robin (WRR) */
78 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
80 /* TC oversubscription */
81 uint64_t tc_ov_credits;
82 uint8_t tc_ov_period_id;
83 } __rte_cache_aligned;
85 struct rte_sched_queue {
90 struct rte_sched_queue_extra {
91 struct rte_sched_queue_stats stats;
102 e_GRINDER_PREFETCH_PIPE = 0,
103 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
104 e_GRINDER_PREFETCH_MBUF,
108 struct rte_sched_subport_profile {
109 /* Token bucket (TB) */
111 uint64_t tb_credits_per_period;
114 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
118 struct rte_sched_grinder {
120 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
121 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
126 enum grinder_state state;
129 struct rte_sched_subport *subport;
130 struct rte_sched_subport_profile *subport_params;
131 struct rte_sched_pipe *pipe;
132 struct rte_sched_pipe_profile *pipe_params;
135 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
136 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
142 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
143 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
144 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
148 struct rte_mbuf *pkt;
151 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
152 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
153 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
156 struct rte_sched_subport {
157 /* Token bucket (TB) */
158 uint64_t tb_time; /* time of last update */
161 /* Traffic classes (TCs) */
162 uint64_t tc_time; /* time of next update */
163 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
165 /* TC oversubscription */
167 uint64_t tc_ov_wm_min;
168 uint64_t tc_ov_wm_max;
169 uint8_t tc_ov_period_id;
175 struct rte_sched_subport_stats stats __rte_cache_aligned;
177 /* subport profile */
180 uint32_t n_pipes_per_subport_enabled;
181 uint32_t n_pipe_profiles;
182 uint32_t n_max_pipe_profiles;
184 /* Pipe best-effort TC rate */
185 uint64_t pipe_tc_be_rate_max;
187 /* Pipe queues size */
188 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
190 #ifdef RTE_SCHED_CMAN
192 enum rte_sched_cman_mode cman;
196 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
197 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
201 /* Scheduling loop detection */
203 uint32_t pipe_exhaustion;
206 struct rte_bitmap *bmp;
207 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
210 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
211 uint32_t busy_grinders;
213 /* Queue base calculation */
214 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
217 struct rte_sched_pipe *pipe;
218 struct rte_sched_queue *queue;
219 struct rte_sched_queue_extra *queue_extra;
220 struct rte_sched_pipe_profile *pipe_profiles;
222 struct rte_mbuf **queue_array;
223 uint8_t memory[0] __rte_cache_aligned;
224 } __rte_cache_aligned;
226 struct rte_sched_port {
227 /* User parameters */
228 uint32_t n_subports_per_port;
229 uint32_t n_pipes_per_subport;
230 uint32_t n_pipes_per_subport_log2;
231 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
232 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
233 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
234 uint32_t n_subport_profiles;
235 uint32_t n_max_subport_profiles;
238 uint32_t frame_overhead;
242 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
243 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
244 uint64_t time; /* Current NIC TX time measured in bytes */
245 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
246 uint64_t cycles_per_byte;
249 struct rte_mbuf **pkts_out;
253 /* Large data structures */
254 struct rte_sched_subport_profile *subport_profiles;
255 struct rte_sched_subport *subports[0] __rte_cache_aligned;
256 } __rte_cache_aligned;
258 enum rte_sched_subport_array {
259 e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
260 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
261 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
262 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
263 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
264 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
265 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
268 static inline uint32_t
269 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
271 return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
274 static inline struct rte_mbuf **
275 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
277 uint32_t pindex = qindex >> 4;
278 uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
280 return (subport->queue_array + pindex *
281 subport->qsize_sum + subport->qsize_add[qpos]);
284 static inline uint16_t
285 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
286 struct rte_sched_subport *subport, uint32_t qindex)
288 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
290 return subport->qsize[tc];
293 static inline uint32_t
294 rte_sched_port_queues_per_port(struct rte_sched_port *port)
296 uint32_t n_queues = 0, i;
298 for (i = 0; i < port->n_subports_per_port; i++)
299 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
304 static inline uint16_t
305 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
307 uint16_t pipe_queue = port->pipe_queue[traffic_class];
312 static inline uint8_t
313 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
315 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
320 static inline uint8_t
321 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
323 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
329 pipe_profile_check(struct rte_sched_pipe_params *params,
330 uint64_t rate, uint16_t *qsize)
334 /* Pipe parameters */
335 if (params == NULL) {
337 "%s: Incorrect value for parameter params\n", __func__);
341 /* TB rate: non-zero, not greater than port rate */
342 if (params->tb_rate == 0 ||
343 params->tb_rate > rate) {
345 "%s: Incorrect value for tb rate\n", __func__);
349 /* TB size: non-zero */
350 if (params->tb_size == 0) {
352 "%s: Incorrect value for tb size\n", __func__);
356 /* TC rate: non-zero if qsize non-zero, less than pipe rate */
357 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
358 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
359 (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
360 params->tc_rate[i] > params->tb_rate))) {
362 "%s: Incorrect value for qsize or tc_rate\n", __func__);
367 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
368 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
370 "%s: Incorrect value for be traffic class rate\n", __func__);
374 /* TC period: non-zero */
375 if (params->tc_period == 0) {
377 "%s: Incorrect value for tc period\n", __func__);
381 /* Best effort tc oversubscription weight: non-zero */
382 if (params->tc_ov_weight == 0) {
384 "%s: Incorrect value for tc ov weight\n", __func__);
388 /* Queue WRR weights: non-zero */
389 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
390 if (params->wrr_weights[i] == 0) {
392 "%s: Incorrect value for wrr weight\n", __func__);
401 subport_profile_check(struct rte_sched_subport_profile_params *params,
406 /* Check user parameters */
407 if (params == NULL) {
408 RTE_LOG(ERR, SCHED, "%s: "
409 "Incorrect value for parameter params\n", __func__);
413 if (params->tb_rate == 0 || params->tb_rate > rate) {
414 RTE_LOG(ERR, SCHED, "%s: "
415 "Incorrect value for tb rate\n", __func__);
419 if (params->tb_size == 0) {
420 RTE_LOG(ERR, SCHED, "%s: "
421 "Incorrect value for tb size\n", __func__);
425 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
426 uint64_t tc_rate = params->tc_rate[i];
428 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
429 RTE_LOG(ERR, SCHED, "%s: "
430 "Incorrect value for tc rate\n", __func__);
435 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
436 RTE_LOG(ERR, SCHED, "%s: "
437 "Incorrect tc rate(best effort)\n", __func__);
441 if (params->tc_period == 0) {
442 RTE_LOG(ERR, SCHED, "%s: "
443 "Incorrect value for tc period\n", __func__);
451 rte_sched_port_check_params(struct rte_sched_port_params *params)
455 if (params == NULL) {
457 "%s: Incorrect value for parameter params\n", __func__);
462 if (params->socket < 0) {
464 "%s: Incorrect value for socket id\n", __func__);
469 if (params->rate == 0) {
471 "%s: Incorrect value for rate\n", __func__);
476 if (params->mtu == 0) {
478 "%s: Incorrect value for mtu\n", __func__);
482 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
483 if (params->n_subports_per_port == 0 ||
484 params->n_subports_per_port > 1u << 16 ||
485 !rte_is_power_of_2(params->n_subports_per_port)) {
487 "%s: Incorrect value for number of subports\n", __func__);
491 if (params->subport_profiles == NULL ||
492 params->n_subport_profiles == 0 ||
493 params->n_max_subport_profiles == 0 ||
494 params->n_subport_profiles > params->n_max_subport_profiles) {
496 "%s: Incorrect value for subport profiles\n", __func__);
500 for (i = 0; i < params->n_subport_profiles; i++) {
501 struct rte_sched_subport_profile_params *p =
502 params->subport_profiles + i;
505 status = subport_profile_check(p, params->rate);
508 "%s: subport profile check failed(%d)\n",
514 /* n_pipes_per_subport: non-zero, power of 2 */
515 if (params->n_pipes_per_subport == 0 ||
516 !rte_is_power_of_2(params->n_pipes_per_subport)) {
518 "%s: Incorrect value for maximum pipes number\n", __func__);
526 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
527 enum rte_sched_subport_array array)
529 uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
530 uint32_t n_subport_pipe_queues =
531 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
533 uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
534 uint32_t size_queue =
535 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
536 uint32_t size_queue_extra
537 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
538 uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
539 sizeof(struct rte_sched_pipe_profile);
540 uint32_t size_bmp_array =
541 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
542 uint32_t size_per_pipe_queue_array, size_queue_array;
546 size_per_pipe_queue_array = 0;
547 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
548 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
549 size_per_pipe_queue_array +=
550 params->qsize[i] * sizeof(struct rte_mbuf *);
552 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
553 params->qsize[i] * sizeof(struct rte_mbuf *);
555 size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
559 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
561 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
563 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
565 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
567 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
569 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
571 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
573 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
575 if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
577 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
579 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
581 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
587 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
591 subport->qsize_add[0] = 0;
593 /* Strict prority traffic class */
594 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
595 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
597 /* Best-effort traffic class */
598 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
599 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
600 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
601 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
602 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
603 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
604 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
605 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
606 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
608 subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
609 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
613 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
615 struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
617 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
618 " Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
619 " Traffic classes: period = %"PRIu64",\n"
620 " credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
621 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
622 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
623 " Best-effort traffic class oversubscription: weight = %hhu\n"
624 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
629 p->tb_credits_per_period,
632 /* Traffic classes */
634 p->tc_credits_per_period[0],
635 p->tc_credits_per_period[1],
636 p->tc_credits_per_period[2],
637 p->tc_credits_per_period[3],
638 p->tc_credits_per_period[4],
639 p->tc_credits_per_period[5],
640 p->tc_credits_per_period[6],
641 p->tc_credits_per_period[7],
642 p->tc_credits_per_period[8],
643 p->tc_credits_per_period[9],
644 p->tc_credits_per_period[10],
645 p->tc_credits_per_period[11],
646 p->tc_credits_per_period[12],
648 /* Best-effort traffic class oversubscription */
652 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
656 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
658 struct rte_sched_subport_profile *p = port->subport_profiles + i;
660 RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
661 "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
663 "Traffic classes: period = %"PRIu64",\n"
664 "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
665 " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
666 " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
671 p->tb_credits_per_period,
674 /* Traffic classes */
676 p->tc_credits_per_period[0],
677 p->tc_credits_per_period[1],
678 p->tc_credits_per_period[2],
679 p->tc_credits_per_period[3],
680 p->tc_credits_per_period[4],
681 p->tc_credits_per_period[5],
682 p->tc_credits_per_period[6],
683 p->tc_credits_per_period[7],
684 p->tc_credits_per_period[8],
685 p->tc_credits_per_period[9],
686 p->tc_credits_per_period[10],
687 p->tc_credits_per_period[11],
688 p->tc_credits_per_period[12]);
691 static inline uint64_t
692 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
694 uint64_t time = time_ms;
696 time = (time * rate) / 1000;
702 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
703 struct rte_sched_pipe_params *src,
704 struct rte_sched_pipe_profile *dst,
707 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
708 uint32_t lcd1, lcd2, lcd;
712 if (src->tb_rate == rate) {
713 dst->tb_credits_per_period = 1;
716 double tb_rate = (double) src->tb_rate
718 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
720 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
724 dst->tb_size = src->tb_size;
726 /* Traffic Classes */
727 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
730 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
731 if (subport->qsize[i])
732 dst->tc_credits_per_period[i]
733 = rte_sched_time_ms_to_bytes(src->tc_period,
736 dst->tc_ov_weight = src->tc_ov_weight;
739 wrr_cost[0] = src->wrr_weights[0];
740 wrr_cost[1] = src->wrr_weights[1];
741 wrr_cost[2] = src->wrr_weights[2];
742 wrr_cost[3] = src->wrr_weights[3];
744 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
745 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
746 lcd = rte_get_lcd(lcd1, lcd2);
748 wrr_cost[0] = lcd / wrr_cost[0];
749 wrr_cost[1] = lcd / wrr_cost[1];
750 wrr_cost[2] = lcd / wrr_cost[2];
751 wrr_cost[3] = lcd / wrr_cost[3];
753 dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
754 dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
755 dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
756 dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
760 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
761 struct rte_sched_subport_profile *dst,
767 if (src->tb_rate == rate) {
768 dst->tb_credits_per_period = 1;
771 double tb_rate = (double) src->tb_rate
773 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
775 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
779 dst->tb_size = src->tb_size;
781 /* Traffic Classes */
782 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
784 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
785 dst->tc_credits_per_period[i]
786 = rte_sched_time_ms_to_bytes(src->tc_period,
791 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
792 struct rte_sched_subport_params *params, uint64_t rate)
796 for (i = 0; i < subport->n_pipe_profiles; i++) {
797 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
798 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
800 rte_sched_pipe_profile_convert(subport, src, dst, rate);
801 rte_sched_port_log_pipe_profile(subport, i);
804 subport->pipe_tc_be_rate_max = 0;
805 for (i = 0; i < subport->n_pipe_profiles; i++) {
806 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
807 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
809 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
810 subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
815 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
816 struct rte_sched_port_params *params,
821 for (i = 0; i < port->n_subport_profiles; i++) {
822 struct rte_sched_subport_profile_params *src
823 = params->subport_profiles + i;
824 struct rte_sched_subport_profile *dst
825 = port->subport_profiles + i;
827 rte_sched_subport_profile_convert(src, dst, rate);
828 rte_sched_port_log_subport_profile(port, i);
833 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
834 uint32_t n_max_pipes_per_subport,
839 /* Check user parameters */
840 if (params == NULL) {
842 "%s: Incorrect value for parameter params\n", __func__);
846 /* qsize: if non-zero, power of 2,
847 * no bigger than 32K (due to 16-bit read/write pointers)
849 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
850 uint16_t qsize = params->qsize[i];
852 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
854 "%s: Incorrect value for qsize\n", __func__);
859 if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
860 RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
864 /* n_pipes_per_subport: non-zero, power of 2 */
865 if (params->n_pipes_per_subport_enabled == 0 ||
866 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
867 !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
869 "%s: Incorrect value for pipes number\n", __func__);
873 /* pipe_profiles and n_pipe_profiles */
874 if (params->pipe_profiles == NULL ||
875 params->n_pipe_profiles == 0 ||
876 params->n_max_pipe_profiles == 0 ||
877 params->n_pipe_profiles > params->n_max_pipe_profiles) {
879 "%s: Incorrect value for pipe profiles\n", __func__);
883 for (i = 0; i < params->n_pipe_profiles; i++) {
884 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
887 status = pipe_profile_check(p, rate, ¶ms->qsize[0]);
890 "%s: Pipe profile check failed(%d)\n", __func__, status);
899 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
900 struct rte_sched_subport_params **subport_params)
902 uint32_t size0 = 0, size1 = 0, i;
905 status = rte_sched_port_check_params(port_params);
908 "%s: Port scheduler port params check failed (%d)\n",
914 for (i = 0; i < port_params->n_subports_per_port; i++) {
915 struct rte_sched_subport_params *sp = subport_params[i];
917 status = rte_sched_subport_check_params(sp,
918 port_params->n_pipes_per_subport,
922 "%s: Port scheduler subport params check failed (%d)\n",
929 size0 = sizeof(struct rte_sched_port);
931 for (i = 0; i < port_params->n_subports_per_port; i++) {
932 struct rte_sched_subport_params *sp = subport_params[i];
934 size1 += rte_sched_subport_get_array_base(sp,
935 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
938 return size0 + size1;
941 struct rte_sched_port *
942 rte_sched_port_config(struct rte_sched_port_params *params)
944 struct rte_sched_port *port = NULL;
945 uint32_t size0, size1, size2;
946 uint32_t cycles_per_byte;
950 status = rte_sched_port_check_params(params);
953 "%s: Port scheduler params check failed (%d)\n",
958 size0 = sizeof(struct rte_sched_port);
959 size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
960 size2 = params->n_max_subport_profiles *
961 sizeof(struct rte_sched_subport_profile);
963 /* Allocate memory to store the data structures */
964 port = rte_zmalloc_socket("qos_params", size0 + size1,
965 RTE_CACHE_LINE_SIZE, params->socket);
967 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
972 /* Allocate memory to store the subport profile */
973 port->subport_profiles = rte_zmalloc_socket("subport_profile", size2,
974 RTE_CACHE_LINE_SIZE, params->socket);
975 if (port->subport_profiles == NULL) {
976 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
981 /* User parameters */
982 port->n_subports_per_port = params->n_subports_per_port;
983 port->n_subport_profiles = params->n_subport_profiles;
984 port->n_max_subport_profiles = params->n_max_subport_profiles;
985 port->n_pipes_per_subport = params->n_pipes_per_subport;
986 port->n_pipes_per_subport_log2 =
987 __builtin_ctz(params->n_pipes_per_subport);
988 port->socket = params->socket;
990 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
991 port->pipe_queue[i] = i;
993 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
994 port->pipe_tc[i] = j;
996 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
1000 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1001 port->tc_queue[i] = j;
1003 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
1006 port->rate = params->rate;
1007 port->mtu = params->mtu + params->frame_overhead;
1008 port->frame_overhead = params->frame_overhead;
1011 port->time_cpu_cycles = rte_get_tsc_cycles();
1012 port->time_cpu_bytes = 0;
1015 /* Subport profile table */
1016 rte_sched_port_config_subport_profile_table(port, params, port->rate);
1018 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1020 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1021 port->cycles_per_byte = cycles_per_byte;
1024 port->pkts_out = NULL;
1025 port->n_pkts_out = 0;
1026 port->subport_id = 0;
1032 rte_sched_subport_free(struct rte_sched_port *port,
1033 struct rte_sched_subport *subport)
1035 uint32_t n_subport_pipe_queues;
1038 if (subport == NULL)
1041 n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1043 /* Free enqueued mbufs */
1044 for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1045 struct rte_mbuf **mbufs =
1046 rte_sched_subport_pipe_qbase(subport, qindex);
1047 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1049 struct rte_sched_queue *queue = subport->queue + qindex;
1050 uint16_t qr = queue->qr & (qsize - 1);
1051 uint16_t qw = queue->qw & (qsize - 1);
1053 for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1054 rte_pktmbuf_free(mbufs[qr]);
1062 rte_sched_port_free(struct rte_sched_port *port)
1066 /* Check user parameters */
1070 for (i = 0; i < port->n_subports_per_port; i++)
1071 rte_sched_subport_free(port, port->subports[i]);
1073 rte_free(port->subport_profiles);
1078 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1082 for (i = 0; i < n_subports; i++) {
1083 struct rte_sched_subport *subport = port->subports[i];
1085 rte_sched_subport_free(port, subport);
1088 rte_free(port->subport_profiles);
1092 #ifdef RTE_SCHED_CMAN
1094 rte_sched_red_config(struct rte_sched_port *port,
1095 struct rte_sched_subport *s,
1096 struct rte_sched_subport_params *params,
1097 uint32_t n_subports)
1101 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1105 for (j = 0; j < RTE_COLORS; j++) {
1106 /* if min/max are both zero, then RED is disabled */
1107 if ((params->cman_params->red_params[i][j].min_th |
1108 params->cman_params->red_params[i][j].max_th) == 0) {
1112 if (rte_red_config_init(&s->red_config[i][j],
1113 params->cman_params->red_params[i][j].wq_log2,
1114 params->cman_params->red_params[i][j].min_th,
1115 params->cman_params->red_params[i][j].max_th,
1116 params->cman_params->red_params[i][j].maxp_inv) != 0) {
1117 rte_sched_free_memory(port, n_subports);
1119 RTE_LOG(NOTICE, SCHED,
1120 "%s: RED configuration init fails\n", __func__);
1125 s->cman = RTE_SCHED_CMAN_RED;
1130 rte_sched_pie_config(struct rte_sched_port *port,
1131 struct rte_sched_subport *s,
1132 struct rte_sched_subport_params *params,
1133 uint32_t n_subports)
1137 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1138 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
1139 RTE_LOG(NOTICE, SCHED,
1140 "%s: PIE tailq threshold incorrect\n", __func__);
1144 if (rte_pie_config_init(&s->pie_config[i],
1145 params->cman_params->pie_params[i].qdelay_ref,
1146 params->cman_params->pie_params[i].dp_update_interval,
1147 params->cman_params->pie_params[i].max_burst,
1148 params->cman_params->pie_params[i].tailq_th) != 0) {
1149 rte_sched_free_memory(port, n_subports);
1151 RTE_LOG(NOTICE, SCHED,
1152 "%s: PIE configuration init fails\n", __func__);
1156 s->cman = RTE_SCHED_CMAN_PIE;
1161 rte_sched_cman_config(struct rte_sched_port *port,
1162 struct rte_sched_subport *s,
1163 struct rte_sched_subport_params *params,
1164 uint32_t n_subports)
1166 if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
1167 return rte_sched_red_config(port, s, params, n_subports);
1169 else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
1170 return rte_sched_pie_config(port, s, params, n_subports);
1177 rte_sched_subport_config(struct rte_sched_port *port,
1178 uint32_t subport_id,
1179 struct rte_sched_subport_params *params,
1180 uint32_t subport_profile_id)
1182 struct rte_sched_subport *s = NULL;
1183 uint32_t n_subports = subport_id;
1184 struct rte_sched_subport_profile *profile;
1185 uint32_t n_subport_pipe_queues, i;
1186 uint32_t size0, size1, bmp_mem_size;
1190 /* Check user parameters */
1193 "%s: Incorrect value for parameter port\n", __func__);
1197 if (subport_id >= port->n_subports_per_port) {
1199 "%s: Incorrect value for subport id\n", __func__);
1204 if (subport_profile_id >= port->n_max_subport_profiles) {
1205 RTE_LOG(ERR, SCHED, "%s: "
1206 "Number of subport profile exceeds the max limit\n",
1212 /** Memory is allocated only on first invocation of the api for a
1213 * given subport. Subsequent invocation on same subport will just
1214 * update subport bandwidth parameter.
1216 if (port->subports[subport_id] == NULL) {
1218 status = rte_sched_subport_check_params(params,
1219 port->n_pipes_per_subport,
1222 RTE_LOG(NOTICE, SCHED,
1223 "%s: Port scheduler params check failed (%d)\n",
1229 /* Determine the amount of memory to allocate */
1230 size0 = sizeof(struct rte_sched_subport);
1231 size1 = rte_sched_subport_get_array_base(params,
1232 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1234 /* Allocate memory to store the data structures */
1235 s = rte_zmalloc_socket("subport_params", size0 + size1,
1236 RTE_CACHE_LINE_SIZE, port->socket);
1239 "%s: Memory allocation fails\n", __func__);
1246 subport_profile_id = 0;
1249 port->subports[subport_id] = s;
1251 s->tb_time = port->time;
1253 /* compile time checks */
1254 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1255 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1256 (RTE_SCHED_PORT_N_GRINDERS - 1));
1258 /* User parameters */
1259 s->n_pipes_per_subport_enabled =
1260 params->n_pipes_per_subport_enabled;
1261 memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1262 s->n_pipe_profiles = params->n_pipe_profiles;
1263 s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1265 #ifdef RTE_SCHED_CMAN
1266 if (params->cman_params != NULL) {
1267 s->cman_enabled = true;
1268 status = rte_sched_cman_config(port, s, params, n_subports);
1270 RTE_LOG(NOTICE, SCHED,
1271 "%s: CMAN configuration fails\n", __func__);
1275 s->cman_enabled = false;
1279 /* Scheduling loop detection */
1280 s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1281 s->pipe_exhaustion = 0;
1284 s->busy_grinders = 0;
1286 /* Queue base calculation */
1287 rte_sched_subport_config_qsize(s);
1289 /* Large data structures */
1290 s->pipe = (struct rte_sched_pipe *)
1291 (s->memory + rte_sched_subport_get_array_base(params,
1292 e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1293 s->queue = (struct rte_sched_queue *)
1294 (s->memory + rte_sched_subport_get_array_base(params,
1295 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1296 s->queue_extra = (struct rte_sched_queue_extra *)
1297 (s->memory + rte_sched_subport_get_array_base(params,
1298 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1299 s->pipe_profiles = (struct rte_sched_pipe_profile *)
1300 (s->memory + rte_sched_subport_get_array_base(params,
1301 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1302 s->bmp_array = s->memory + rte_sched_subport_get_array_base(
1303 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1304 s->queue_array = (struct rte_mbuf **)
1305 (s->memory + rte_sched_subport_get_array_base(params,
1306 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1308 /* Pipe profile table */
1309 rte_sched_subport_config_pipe_profile_table(s, params,
1313 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1314 bmp_mem_size = rte_bitmap_get_memory_footprint(
1315 n_subport_pipe_queues);
1316 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1318 if (s->bmp == NULL) {
1320 "%s: Subport bitmap init error\n", __func__);
1325 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1326 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1328 #ifdef RTE_SCHED_SUBPORT_TC_OV
1329 /* TC oversubscription */
1330 s->tc_ov_wm_min = port->mtu;
1331 s->tc_ov_period_id = 0;
1339 /* update subport parameters from subport profile table*/
1340 profile = port->subport_profiles + subport_profile_id;
1342 s = port->subports[subport_id];
1344 s->tb_credits = profile->tb_size / 2;
1346 s->tc_time = port->time + profile->tc_period;
1348 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1351 profile->tc_credits_per_period[i];
1353 profile->tc_credits_per_period[i] = 0;
1355 #ifdef RTE_SCHED_SUBPORT_TC_OV
1356 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
1357 s->pipe_tc_be_rate_max);
1358 s->tc_ov_wm = s->tc_ov_wm_max;
1360 s->profile = subport_profile_id;
1364 rte_sched_port_log_subport_profile(port, subport_profile_id);
1369 rte_sched_free_memory(port, n_subports);
1375 rte_sched_pipe_config(struct rte_sched_port *port,
1376 uint32_t subport_id,
1378 int32_t pipe_profile)
1380 struct rte_sched_subport *s;
1381 struct rte_sched_subport_profile *sp;
1382 struct rte_sched_pipe *p;
1383 struct rte_sched_pipe_profile *params;
1384 uint32_t n_subports = subport_id + 1;
1385 uint32_t deactivate, profile, i;
1388 /* Check user parameters */
1389 profile = (uint32_t) pipe_profile;
1390 deactivate = (pipe_profile < 0);
1394 "%s: Incorrect value for parameter port\n", __func__);
1398 if (subport_id >= port->n_subports_per_port) {
1400 "%s: Incorrect value for parameter subport id\n", __func__);
1405 s = port->subports[subport_id];
1406 if (pipe_id >= s->n_pipes_per_subport_enabled) {
1408 "%s: Incorrect value for parameter pipe id\n", __func__);
1413 if (!deactivate && profile >= s->n_pipe_profiles) {
1415 "%s: Incorrect value for parameter pipe profile\n", __func__);
1420 sp = port->subport_profiles + s->profile;
1421 /* Handle the case when pipe already has a valid configuration */
1422 p = s->pipe + pipe_id;
1424 params = s->pipe_profiles + p->profile;
1426 double subport_tc_be_rate =
1427 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1428 / (double) sp->tc_period;
1429 double pipe_tc_be_rate =
1430 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1431 / (double) params->tc_period;
1432 uint32_t tc_be_ov = s->tc_ov;
1434 /* Unplug pipe from its subport */
1435 s->tc_ov_n -= params->tc_ov_weight;
1436 s->tc_ov_rate -= pipe_tc_be_rate;
1437 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1439 if (s->tc_ov != tc_be_ov) {
1440 RTE_LOG(DEBUG, SCHED,
1441 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1442 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1445 /* Reset the pipe */
1446 memset(p, 0, sizeof(struct rte_sched_pipe));
1452 /* Apply the new pipe configuration */
1453 p->profile = profile;
1454 params = s->pipe_profiles + p->profile;
1456 /* Token Bucket (TB) */
1457 p->tb_time = port->time;
1458 p->tb_credits = params->tb_size / 2;
1460 /* Traffic Classes (TCs) */
1461 p->tc_time = port->time + params->tc_period;
1463 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1465 p->tc_credits[i] = params->tc_credits_per_period[i];
1468 /* Subport best effort tc oversubscription */
1469 double subport_tc_be_rate =
1470 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1471 / (double) sp->tc_period;
1472 double pipe_tc_be_rate =
1473 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1474 / (double) params->tc_period;
1475 uint32_t tc_be_ov = s->tc_ov;
1477 s->tc_ov_n += params->tc_ov_weight;
1478 s->tc_ov_rate += pipe_tc_be_rate;
1479 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1481 if (s->tc_ov != tc_be_ov) {
1482 RTE_LOG(DEBUG, SCHED,
1483 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1484 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1486 p->tc_ov_period_id = s->tc_ov_period_id;
1487 p->tc_ov_credits = s->tc_ov_wm;
1493 rte_sched_free_memory(port, n_subports);
1499 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1500 uint32_t subport_id,
1501 struct rte_sched_pipe_params *params,
1502 uint32_t *pipe_profile_id)
1504 struct rte_sched_subport *s;
1505 struct rte_sched_pipe_profile *pp;
1512 "%s: Incorrect value for parameter port\n", __func__);
1516 /* Subport id not exceeds the max limit */
1517 if (subport_id > port->n_subports_per_port) {
1519 "%s: Incorrect value for subport id\n", __func__);
1523 s = port->subports[subport_id];
1525 /* Pipe profiles exceeds the max limit */
1526 if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1528 "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1533 status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1536 "%s: Pipe profile check failed(%d)\n", __func__, status);
1540 pp = &s->pipe_profiles[s->n_pipe_profiles];
1541 rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1543 /* Pipe profile should not exists */
1544 for (i = 0; i < s->n_pipe_profiles; i++)
1545 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1547 "%s: Pipe profile exists\n", __func__);
1551 /* Pipe profile commit */
1552 *pipe_profile_id = s->n_pipe_profiles;
1553 s->n_pipe_profiles++;
1555 if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1556 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1558 rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1564 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1565 struct rte_sched_subport_profile_params *params,
1566 uint32_t *subport_profile_id)
1570 struct rte_sched_subport_profile *dst;
1574 RTE_LOG(ERR, SCHED, "%s: "
1575 "Incorrect value for parameter port\n", __func__);
1579 if (params == NULL) {
1580 RTE_LOG(ERR, SCHED, "%s: "
1581 "Incorrect value for parameter profile\n", __func__);
1585 if (subport_profile_id == NULL) {
1586 RTE_LOG(ERR, SCHED, "%s: "
1587 "Incorrect value for parameter subport_profile_id\n",
1592 dst = port->subport_profiles + port->n_subport_profiles;
1594 /* Subport profiles exceeds the max limit */
1595 if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1596 RTE_LOG(ERR, SCHED, "%s: "
1597 "Number of subport profiles exceeds the max limit\n",
1602 status = subport_profile_check(params, port->rate);
1605 "%s: subport profile check failed(%d)\n", __func__, status);
1609 rte_sched_subport_profile_convert(params, dst, port->rate);
1611 /* Subport profile should not exists */
1612 for (i = 0; i < port->n_subport_profiles; i++)
1613 if (memcmp(port->subport_profiles + i,
1614 dst, sizeof(*dst)) == 0) {
1616 "%s: subport profile exists\n", __func__);
1620 /* Subport profile commit */
1621 *subport_profile_id = port->n_subport_profiles;
1622 port->n_subport_profiles++;
1624 rte_sched_port_log_subport_profile(port, *subport_profile_id);
1629 static inline uint32_t
1630 rte_sched_port_qindex(struct rte_sched_port *port,
1633 uint32_t traffic_class,
1636 return ((subport & (port->n_subports_per_port - 1)) <<
1637 (port->n_pipes_per_subport_log2 + 4)) |
1639 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1640 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1641 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1645 rte_sched_port_pkt_write(struct rte_sched_port *port,
1646 struct rte_mbuf *pkt,
1647 uint32_t subport, uint32_t pipe,
1648 uint32_t traffic_class,
1649 uint32_t queue, enum rte_color color)
1652 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1654 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1658 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1659 const struct rte_mbuf *pkt,
1660 uint32_t *subport, uint32_t *pipe,
1661 uint32_t *traffic_class, uint32_t *queue)
1663 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1665 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1666 *pipe = (queue_id >> 4) &
1667 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1668 *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1669 *queue = rte_sched_port_tc_queue(port, queue_id);
1673 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1675 return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1679 rte_sched_subport_read_stats(struct rte_sched_port *port,
1680 uint32_t subport_id,
1681 struct rte_sched_subport_stats *stats,
1684 struct rte_sched_subport *s;
1686 /* Check user parameters */
1689 "%s: Incorrect value for parameter port\n", __func__);
1693 if (subport_id >= port->n_subports_per_port) {
1695 "%s: Incorrect value for subport id\n", __func__);
1699 if (stats == NULL) {
1701 "%s: Incorrect value for parameter stats\n", __func__);
1705 if (tc_ov == NULL) {
1707 "%s: Incorrect value for tc_ov\n", __func__);
1711 s = port->subports[subport_id];
1713 /* Copy subport stats and clear */
1714 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1715 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1717 /* Subport TC oversubscription status */
1724 rte_sched_queue_read_stats(struct rte_sched_port *port,
1726 struct rte_sched_queue_stats *stats,
1729 struct rte_sched_subport *s;
1730 struct rte_sched_queue *q;
1731 struct rte_sched_queue_extra *qe;
1732 uint32_t subport_id, subport_qmask, subport_qindex;
1734 /* Check user parameters */
1737 "%s: Incorrect value for parameter port\n", __func__);
1741 if (queue_id >= rte_sched_port_queues_per_port(port)) {
1743 "%s: Incorrect value for queue id\n", __func__);
1747 if (stats == NULL) {
1749 "%s: Incorrect value for parameter stats\n", __func__);
1755 "%s: Incorrect value for parameter qlen\n", __func__);
1758 subport_qmask = port->n_pipes_per_subport_log2 + 4;
1759 subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1761 s = port->subports[subport_id];
1762 subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1763 q = s->queue + subport_qindex;
1764 qe = s->queue_extra + subport_qindex;
1766 /* Copy queue stats and clear */
1767 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1768 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1771 *qlen = q->qw - q->qr;
1776 #ifdef RTE_SCHED_DEBUG
1779 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1782 struct rte_sched_queue *queue = subport->queue + qindex;
1784 return queue->qr == queue->qw;
1787 #endif /* RTE_SCHED_DEBUG */
1789 #ifdef RTE_SCHED_COLLECT_STATS
1792 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1793 struct rte_sched_subport *subport,
1795 struct rte_mbuf *pkt)
1797 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1798 uint32_t pkt_len = pkt->pkt_len;
1800 subport->stats.n_pkts_tc[tc_index] += 1;
1801 subport->stats.n_bytes_tc[tc_index] += pkt_len;
1805 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1806 struct rte_sched_subport *subport,
1808 struct rte_mbuf *pkt,
1809 __rte_unused uint32_t n_pkts_cman_dropped)
1811 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1812 uint32_t pkt_len = pkt->pkt_len;
1814 subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1815 subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1816 subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
1820 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1822 struct rte_mbuf *pkt)
1824 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1825 uint32_t pkt_len = pkt->pkt_len;
1827 qe->stats.n_pkts += 1;
1828 qe->stats.n_bytes += pkt_len;
1832 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1834 struct rte_mbuf *pkt,
1835 __rte_unused uint32_t n_pkts_cman_dropped)
1837 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1838 uint32_t pkt_len = pkt->pkt_len;
1840 qe->stats.n_pkts_dropped += 1;
1841 qe->stats.n_bytes_dropped += pkt_len;
1842 #ifdef RTE_SCHED_CMAN
1843 if (subport->cman_enabled)
1844 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
1848 #endif /* RTE_SCHED_COLLECT_STATS */
1850 #ifdef RTE_SCHED_CMAN
1853 rte_sched_port_cman_drop(struct rte_sched_port *port,
1854 struct rte_sched_subport *subport,
1855 struct rte_mbuf *pkt,
1859 if (!subport->cman_enabled)
1862 struct rte_sched_queue_extra *qe;
1865 tc_index = rte_sched_port_pipe_tc(port, qindex);
1866 qe = subport->queue_extra + qindex;
1869 if (subport->cman == RTE_SCHED_CMAN_RED) {
1870 struct rte_red_config *red_cfg;
1871 struct rte_red *red;
1872 enum rte_color color;
1874 color = rte_sched_port_pkt_read_color(pkt);
1875 red_cfg = &subport->red_config[tc_index][color];
1877 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1882 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1886 struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
1887 struct rte_pie *pie = &qe->pie;
1889 return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
1893 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
1894 struct rte_sched_subport *subport, uint32_t qindex)
1896 if (subport->cman_enabled) {
1897 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1898 if (subport->cman == RTE_SCHED_CMAN_RED) {
1899 struct rte_red *red = &qe->red;
1901 rte_red_mark_queue_empty(red, port->time);
1907 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
1908 uint32_t qindex, uint32_t pkt_len, uint64_t time) {
1909 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
1910 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1911 struct rte_pie *pie = &qe->pie;
1913 /* Update queue length */
1915 pie->qlen_bytes -= pkt_len;
1917 rte_pie_dequeue(pie, pkt_len, time);
1923 static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
1924 struct rte_sched_subport *subport __rte_unused,
1925 struct rte_mbuf *pkt __rte_unused,
1926 uint32_t qindex __rte_unused,
1927 uint16_t qlen __rte_unused)
1932 #define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
1935 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
1936 uint32_t qindex __rte_unused,
1937 uint32_t pkt_len __rte_unused,
1938 uint64_t time __rte_unused) {
1939 /* do-nothing when RTE_SCHED_CMAN not defined */
1942 #endif /* RTE_SCHED_CMAN */
1944 #ifdef RTE_SCHED_DEBUG
1947 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1954 rte_panic("Empty slab at position %u\n", bmp_pos);
1957 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1958 if (mask & bmp_slab) {
1959 if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1960 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1967 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1971 #endif /* RTE_SCHED_DEBUG */
1973 static inline struct rte_sched_subport *
1974 rte_sched_port_subport(struct rte_sched_port *port,
1975 struct rte_mbuf *pkt)
1977 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1978 uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1980 return port->subports[subport_id];
1983 static inline uint32_t
1984 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1985 struct rte_mbuf *pkt, uint32_t subport_qmask)
1987 struct rte_sched_queue *q;
1988 #ifdef RTE_SCHED_COLLECT_STATS
1989 struct rte_sched_queue_extra *qe;
1991 uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1992 uint32_t subport_queue_id = subport_qmask & qindex;
1994 q = subport->queue + subport_queue_id;
1996 #ifdef RTE_SCHED_COLLECT_STATS
1997 qe = subport->queue_extra + subport_queue_id;
2001 return subport_queue_id;
2005 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
2006 struct rte_sched_subport *subport,
2008 struct rte_mbuf **qbase)
2010 struct rte_sched_queue *q;
2011 struct rte_mbuf **q_qw;
2014 q = subport->queue + qindex;
2015 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2016 q_qw = qbase + (q->qw & (qsize - 1));
2018 rte_prefetch0(q_qw);
2019 rte_bitmap_prefetch0(subport->bmp, qindex);
2023 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
2024 struct rte_sched_subport *subport,
2026 struct rte_mbuf **qbase,
2027 struct rte_mbuf *pkt)
2029 struct rte_sched_queue *q;
2033 q = subport->queue + qindex;
2034 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2035 qlen = q->qw - q->qr;
2037 /* Drop the packet (and update drop stats) when queue is full */
2038 if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
2040 rte_pktmbuf_free(pkt);
2041 #ifdef RTE_SCHED_COLLECT_STATS
2042 rte_sched_port_update_subport_stats_on_drop(port, subport,
2043 qindex, pkt, qlen < qsize);
2044 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
2050 /* Enqueue packet */
2051 qbase[q->qw & (qsize - 1)] = pkt;
2054 /* Activate queue in the subport bitmap */
2055 rte_bitmap_set(subport->bmp, qindex);
2058 #ifdef RTE_SCHED_COLLECT_STATS
2059 rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2060 rte_sched_port_update_queue_stats(subport, qindex, pkt);
2068 * The enqueue function implements a 4-level pipeline with each stage
2069 * processing two different packets. The purpose of using a pipeline
2070 * is to hide the latency of prefetching the data structures. The
2071 * naming convention is presented in the diagram below:
2073 * p00 _______ p10 _______ p20 _______ p30 _______
2074 * ----->| |----->| |----->| |----->| |----->
2075 * | 0 | | 1 | | 2 | | 3 |
2076 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2081 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2084 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2085 *pkt30, *pkt31, *pkt_last;
2086 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2087 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2088 struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2089 *subport20, *subport21, *subport30, *subport31, *subport_last;
2090 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2091 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2092 uint32_t subport_qmask;
2096 subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2099 * Less then 6 input packets available, which is not enough to
2102 if (unlikely(n_pkts < 6)) {
2103 struct rte_sched_subport *subports[5];
2104 struct rte_mbuf **q_base[5];
2107 /* Prefetch the mbuf structure of each packet */
2108 for (i = 0; i < n_pkts; i++)
2109 rte_prefetch0(pkts[i]);
2111 /* Prefetch the subport structure for each packet */
2112 for (i = 0; i < n_pkts; i++)
2113 subports[i] = rte_sched_port_subport(port, pkts[i]);
2115 /* Prefetch the queue structure for each queue */
2116 for (i = 0; i < n_pkts; i++)
2117 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2118 pkts[i], subport_qmask);
2120 /* Prefetch the write pointer location of each queue */
2121 for (i = 0; i < n_pkts; i++) {
2122 q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2123 rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2127 /* Write each packet to its queue */
2128 for (i = 0; i < n_pkts; i++)
2129 result += rte_sched_port_enqueue_qwa(port, subports[i],
2130 q[i], q_base[i], pkts[i]);
2135 /* Feed the first 3 stages of the pipeline (6 packets needed) */
2138 rte_prefetch0(pkt20);
2139 rte_prefetch0(pkt21);
2143 rte_prefetch0(pkt10);
2144 rte_prefetch0(pkt11);
2146 subport20 = rte_sched_port_subport(port, pkt20);
2147 subport21 = rte_sched_port_subport(port, pkt21);
2148 q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2149 pkt20, subport_qmask);
2150 q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2151 pkt21, subport_qmask);
2155 rte_prefetch0(pkt00);
2156 rte_prefetch0(pkt01);
2158 subport10 = rte_sched_port_subport(port, pkt10);
2159 subport11 = rte_sched_port_subport(port, pkt11);
2160 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2161 pkt10, subport_qmask);
2162 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2163 pkt11, subport_qmask);
2165 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2166 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2167 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2168 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2170 /* Run the pipeline */
2171 for (i = 6; i < (n_pkts & (~1)); i += 2) {
2172 /* Propagate stage inputs */
2183 subport30 = subport20;
2184 subport31 = subport21;
2185 subport20 = subport10;
2186 subport21 = subport11;
2187 q30_base = q20_base;
2188 q31_base = q21_base;
2190 /* Stage 0: Get packets in */
2192 pkt01 = pkts[i + 1];
2193 rte_prefetch0(pkt00);
2194 rte_prefetch0(pkt01);
2196 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2197 subport10 = rte_sched_port_subport(port, pkt10);
2198 subport11 = rte_sched_port_subport(port, pkt11);
2199 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2200 pkt10, subport_qmask);
2201 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2202 pkt11, subport_qmask);
2204 /* Stage 2: Prefetch queue write location */
2205 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2206 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2207 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2208 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2210 /* Stage 3: Write packet to queue and activate queue */
2211 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2212 q30, q30_base, pkt30);
2213 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2214 q31, q31_base, pkt31);
2215 result += r30 + r31;
2219 * Drain the pipeline (exactly 6 packets).
2220 * Handle the last packet in the case
2221 * of an odd number of input packets.
2223 pkt_last = pkts[n_pkts - 1];
2224 rte_prefetch0(pkt_last);
2226 subport00 = rte_sched_port_subport(port, pkt00);
2227 subport01 = rte_sched_port_subport(port, pkt01);
2228 q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2229 pkt00, subport_qmask);
2230 q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2231 pkt01, subport_qmask);
2233 q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2234 q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2235 rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2236 rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2238 r20 = rte_sched_port_enqueue_qwa(port, subport20,
2239 q20, q20_base, pkt20);
2240 r21 = rte_sched_port_enqueue_qwa(port, subport21,
2241 q21, q21_base, pkt21);
2242 result += r20 + r21;
2244 subport_last = rte_sched_port_subport(port, pkt_last);
2245 q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2246 pkt_last, subport_qmask);
2248 q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2249 q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2250 rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2251 rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2253 r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2255 r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2257 result += r10 + r11;
2259 q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2260 rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2261 q_last, q_last_base);
2263 r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2265 r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2267 result += r00 + r01;
2270 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2271 q_last, q_last_base, pkt_last);
2278 #ifndef RTE_SCHED_SUBPORT_TC_OV
2281 grinder_credits_update(struct rte_sched_port *port,
2282 struct rte_sched_subport *subport, uint32_t pos)
2284 struct rte_sched_grinder *grinder = subport->grinder + pos;
2285 struct rte_sched_pipe *pipe = grinder->pipe;
2286 struct rte_sched_pipe_profile *params = grinder->pipe_params;
2287 struct rte_sched_subport_profile *sp = grinder->subport_params;
2292 n_periods = (port->time - subport->tb_time) / sp->tb_period;
2293 subport->tb_credits += n_periods * sp->tb_credits_per_period;
2294 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2295 subport->tb_time += n_periods * sp->tb_period;
2298 n_periods = (port->time - pipe->tb_time) / params->tb_period;
2299 pipe->tb_credits += n_periods * params->tb_credits_per_period;
2300 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2301 pipe->tb_time += n_periods * params->tb_period;
2304 if (unlikely(port->time >= subport->tc_time)) {
2305 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2306 subport->tc_credits[i] = sp->tc_credits_per_period[i];
2308 subport->tc_time = port->time + sp->tc_period;
2312 if (unlikely(port->time >= pipe->tc_time)) {
2313 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2314 pipe->tc_credits[i] = params->tc_credits_per_period[i];
2316 pipe->tc_time = port->time + params->tc_period;
2322 static inline uint64_t
2323 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2324 struct rte_sched_subport *subport, uint32_t pos)
2326 struct rte_sched_grinder *grinder = subport->grinder + pos;
2327 struct rte_sched_subport_profile *sp = grinder->subport_params;
2328 uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2329 uint64_t tc_consumption = 0, tc_ov_consumption_max;
2330 uint64_t tc_ov_wm = subport->tc_ov_wm;
2333 if (subport->tc_ov == 0)
2334 return subport->tc_ov_wm_max;
2336 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2337 tc_ov_consumption[i] = sp->tc_credits_per_period[i]
2338 - subport->tc_credits[i];
2339 tc_consumption += tc_ov_consumption[i];
2342 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2343 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2344 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2346 tc_ov_consumption_max =
2347 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2350 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2351 (tc_ov_consumption_max - port->mtu)) {
2352 tc_ov_wm -= tc_ov_wm >> 7;
2353 if (tc_ov_wm < subport->tc_ov_wm_min)
2354 tc_ov_wm = subport->tc_ov_wm_min;
2359 tc_ov_wm += (tc_ov_wm >> 7) + 1;
2360 if (tc_ov_wm > subport->tc_ov_wm_max)
2361 tc_ov_wm = subport->tc_ov_wm_max;
2367 grinder_credits_update(struct rte_sched_port *port,
2368 struct rte_sched_subport *subport, uint32_t pos)
2370 struct rte_sched_grinder *grinder = subport->grinder + pos;
2371 struct rte_sched_pipe *pipe = grinder->pipe;
2372 struct rte_sched_pipe_profile *params = grinder->pipe_params;
2373 struct rte_sched_subport_profile *sp = grinder->subport_params;
2378 n_periods = (port->time - subport->tb_time) / sp->tb_period;
2379 subport->tb_credits += n_periods * sp->tb_credits_per_period;
2380 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2381 subport->tb_time += n_periods * sp->tb_period;
2384 n_periods = (port->time - pipe->tb_time) / params->tb_period;
2385 pipe->tb_credits += n_periods * params->tb_credits_per_period;
2386 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2387 pipe->tb_time += n_periods * params->tb_period;
2390 if (unlikely(port->time >= subport->tc_time)) {
2392 grinder_tc_ov_credits_update(port, subport, pos);
2394 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2395 subport->tc_credits[i] = sp->tc_credits_per_period[i];
2397 subport->tc_time = port->time + sp->tc_period;
2398 subport->tc_ov_period_id++;
2402 if (unlikely(port->time >= pipe->tc_time)) {
2403 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2404 pipe->tc_credits[i] = params->tc_credits_per_period[i];
2405 pipe->tc_time = port->time + params->tc_period;
2408 /* Pipe TCs - Oversubscription */
2409 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2410 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2412 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2416 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2419 #ifndef RTE_SCHED_SUBPORT_TC_OV
2422 grinder_credits_check(struct rte_sched_port *port,
2423 struct rte_sched_subport *subport, uint32_t pos)
2425 struct rte_sched_grinder *grinder = subport->grinder + pos;
2426 struct rte_sched_pipe *pipe = grinder->pipe;
2427 struct rte_mbuf *pkt = grinder->pkt;
2428 uint32_t tc_index = grinder->tc_index;
2429 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2430 uint64_t subport_tb_credits = subport->tb_credits;
2431 uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2432 uint64_t pipe_tb_credits = pipe->tb_credits;
2433 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2436 /* Check queue credits */
2437 enough_credits = (pkt_len <= subport_tb_credits) &&
2438 (pkt_len <= subport_tc_credits) &&
2439 (pkt_len <= pipe_tb_credits) &&
2440 (pkt_len <= pipe_tc_credits);
2442 if (!enough_credits)
2445 /* Update port credits */
2446 subport->tb_credits -= pkt_len;
2447 subport->tc_credits[tc_index] -= pkt_len;
2448 pipe->tb_credits -= pkt_len;
2449 pipe->tc_credits[tc_index] -= pkt_len;
2457 grinder_credits_check(struct rte_sched_port *port,
2458 struct rte_sched_subport *subport, uint32_t pos)
2460 struct rte_sched_grinder *grinder = subport->grinder + pos;
2461 struct rte_sched_pipe *pipe = grinder->pipe;
2462 struct rte_mbuf *pkt = grinder->pkt;
2463 uint32_t tc_index = grinder->tc_index;
2464 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2465 uint64_t subport_tb_credits = subport->tb_credits;
2466 uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2467 uint64_t pipe_tb_credits = pipe->tb_credits;
2468 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2469 uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2470 uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2471 uint64_t pipe_tc_ov_credits;
2475 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2476 pipe_tc_ov_mask1[i] = ~0LLU;
2478 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2479 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2480 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2482 /* Check pipe and subport credits */
2483 enough_credits = (pkt_len <= subport_tb_credits) &&
2484 (pkt_len <= subport_tc_credits) &&
2485 (pkt_len <= pipe_tb_credits) &&
2486 (pkt_len <= pipe_tc_credits) &&
2487 (pkt_len <= pipe_tc_ov_credits);
2489 if (!enough_credits)
2492 /* Update pipe and subport credits */
2493 subport->tb_credits -= pkt_len;
2494 subport->tc_credits[tc_index] -= pkt_len;
2495 pipe->tb_credits -= pkt_len;
2496 pipe->tc_credits[tc_index] -= pkt_len;
2497 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2502 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2506 grinder_schedule(struct rte_sched_port *port,
2507 struct rte_sched_subport *subport, uint32_t pos)
2509 struct rte_sched_grinder *grinder = subport->grinder + pos;
2510 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2511 uint32_t qindex = grinder->qindex[grinder->qpos];
2512 struct rte_mbuf *pkt = grinder->pkt;
2513 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2514 uint32_t be_tc_active;
2516 if (!grinder_credits_check(port, subport, pos))
2519 /* Advance port time */
2520 port->time += pkt_len;
2523 port->pkts_out[port->n_pkts_out++] = pkt;
2526 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2527 grinder->wrr_tokens[grinder->qpos] +=
2528 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2530 if (queue->qr == queue->qw) {
2531 rte_bitmap_clear(subport->bmp, qindex);
2532 grinder->qmask &= ~(1 << grinder->qpos);
2534 grinder->wrr_mask[grinder->qpos] = 0;
2536 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
2539 rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
2541 /* Reset pipe loop detection */
2542 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2543 grinder->productive = 1;
2548 #ifdef SCHED_VECTOR_SSE4
2551 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2553 __m128i index = _mm_set1_epi32(base_pipe);
2554 __m128i pipes = _mm_load_si128((__m128i *)subport->grinder_base_bmp_pos);
2555 __m128i res = _mm_cmpeq_epi32(pipes, index);
2557 pipes = _mm_load_si128((__m128i *)(subport->grinder_base_bmp_pos + 4));
2558 pipes = _mm_cmpeq_epi32(pipes, index);
2559 res = _mm_or_si128(res, pipes);
2561 if (_mm_testz_si128(res, res))
2567 #elif defined(SCHED_VECTOR_NEON)
2570 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2572 uint32x4_t index, pipes;
2573 uint32_t *pos = (uint32_t *)subport->grinder_base_bmp_pos;
2575 index = vmovq_n_u32(base_pipe);
2576 pipes = vld1q_u32(pos);
2577 if (!vminvq_u32(veorq_u32(pipes, index)))
2580 pipes = vld1q_u32(pos + 4);
2581 if (!vminvq_u32(veorq_u32(pipes, index)))
2590 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2594 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2595 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2602 #endif /* RTE_SCHED_OPTIMIZATIONS */
2605 grinder_pcache_populate(struct rte_sched_subport *subport,
2606 uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2608 struct rte_sched_grinder *grinder = subport->grinder + pos;
2611 grinder->pcache_w = 0;
2612 grinder->pcache_r = 0;
2614 w[0] = (uint16_t) bmp_slab;
2615 w[1] = (uint16_t) (bmp_slab >> 16);
2616 w[2] = (uint16_t) (bmp_slab >> 32);
2617 w[3] = (uint16_t) (bmp_slab >> 48);
2619 grinder->pcache_qmask[grinder->pcache_w] = w[0];
2620 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2621 grinder->pcache_w += (w[0] != 0);
2623 grinder->pcache_qmask[grinder->pcache_w] = w[1];
2624 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2625 grinder->pcache_w += (w[1] != 0);
2627 grinder->pcache_qmask[grinder->pcache_w] = w[2];
2628 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2629 grinder->pcache_w += (w[2] != 0);
2631 grinder->pcache_qmask[grinder->pcache_w] = w[3];
2632 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2633 grinder->pcache_w += (w[3] != 0);
2637 grinder_tccache_populate(struct rte_sched_subport *subport,
2638 uint32_t pos, uint32_t qindex, uint16_t qmask)
2640 struct rte_sched_grinder *grinder = subport->grinder + pos;
2643 grinder->tccache_w = 0;
2644 grinder->tccache_r = 0;
2646 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2647 b = (uint8_t) ((qmask >> i) & 0x1);
2648 grinder->tccache_qmask[grinder->tccache_w] = b;
2649 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2650 grinder->tccache_w += (b != 0);
2653 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2654 grinder->tccache_qmask[grinder->tccache_w] = b;
2655 grinder->tccache_qindex[grinder->tccache_w] = qindex +
2656 RTE_SCHED_TRAFFIC_CLASS_BE;
2657 grinder->tccache_w += (b != 0);
2661 grinder_next_tc(struct rte_sched_port *port,
2662 struct rte_sched_subport *subport, uint32_t pos)
2664 struct rte_sched_grinder *grinder = subport->grinder + pos;
2665 struct rte_mbuf **qbase;
2669 if (grinder->tccache_r == grinder->tccache_w)
2672 qindex = grinder->tccache_qindex[grinder->tccache_r];
2673 qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2674 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2676 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2677 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2678 grinder->qsize = qsize;
2680 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2681 grinder->queue[0] = subport->queue + qindex;
2682 grinder->qbase[0] = qbase;
2683 grinder->qindex[0] = qindex;
2684 grinder->tccache_r++;
2689 grinder->queue[0] = subport->queue + qindex;
2690 grinder->queue[1] = subport->queue + qindex + 1;
2691 grinder->queue[2] = subport->queue + qindex + 2;
2692 grinder->queue[3] = subport->queue + qindex + 3;
2694 grinder->qbase[0] = qbase;
2695 grinder->qbase[1] = qbase + qsize;
2696 grinder->qbase[2] = qbase + 2 * qsize;
2697 grinder->qbase[3] = qbase + 3 * qsize;
2699 grinder->qindex[0] = qindex;
2700 grinder->qindex[1] = qindex + 1;
2701 grinder->qindex[2] = qindex + 2;
2702 grinder->qindex[3] = qindex + 3;
2704 grinder->tccache_r++;
2709 grinder_next_pipe(struct rte_sched_port *port,
2710 struct rte_sched_subport *subport, uint32_t pos)
2712 struct rte_sched_grinder *grinder = subport->grinder + pos;
2713 uint32_t pipe_qindex;
2714 uint16_t pipe_qmask;
2716 if (grinder->pcache_r < grinder->pcache_w) {
2717 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2718 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2719 grinder->pcache_r++;
2721 uint64_t bmp_slab = 0;
2722 uint32_t bmp_pos = 0;
2724 /* Get another non-empty pipe group */
2725 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2728 #ifdef RTE_SCHED_DEBUG
2729 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2732 /* Return if pipe group already in one of the other grinders */
2733 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2734 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2737 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2739 /* Install new pipe group into grinder's pipe cache */
2740 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2742 pipe_qmask = grinder->pcache_qmask[0];
2743 pipe_qindex = grinder->pcache_qindex[0];
2744 grinder->pcache_r = 1;
2747 /* Install new pipe in the grinder */
2748 grinder->pindex = pipe_qindex >> 4;
2749 grinder->subport = subport;
2750 grinder->pipe = subport->pipe + grinder->pindex;
2751 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2752 grinder->productive = 0;
2754 grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2755 grinder_next_tc(port, subport, pos);
2757 /* Check for pipe exhaustion */
2758 if (grinder->pindex == subport->pipe_loop) {
2759 subport->pipe_exhaustion = 1;
2760 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2768 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2770 struct rte_sched_grinder *grinder = subport->grinder + pos;
2771 struct rte_sched_pipe *pipe = grinder->pipe;
2772 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2773 uint32_t qmask = grinder->qmask;
2775 grinder->wrr_tokens[0] =
2776 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2777 grinder->wrr_tokens[1] =
2778 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2779 grinder->wrr_tokens[2] =
2780 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2781 grinder->wrr_tokens[3] =
2782 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2784 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2785 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2786 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2787 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2789 grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2790 grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2791 grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2792 grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2796 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2798 struct rte_sched_grinder *grinder = subport->grinder + pos;
2799 struct rte_sched_pipe *pipe = grinder->pipe;
2801 pipe->wrr_tokens[0] =
2802 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2803 RTE_SCHED_WRR_SHIFT;
2804 pipe->wrr_tokens[1] =
2805 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2806 RTE_SCHED_WRR_SHIFT;
2807 pipe->wrr_tokens[2] =
2808 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2809 RTE_SCHED_WRR_SHIFT;
2810 pipe->wrr_tokens[3] =
2811 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2812 RTE_SCHED_WRR_SHIFT;
2816 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2818 struct rte_sched_grinder *grinder = subport->grinder + pos;
2819 uint16_t wrr_tokens_min;
2821 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2822 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2823 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2824 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2826 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2827 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2829 grinder->wrr_tokens[0] -= wrr_tokens_min;
2830 grinder->wrr_tokens[1] -= wrr_tokens_min;
2831 grinder->wrr_tokens[2] -= wrr_tokens_min;
2832 grinder->wrr_tokens[3] -= wrr_tokens_min;
2836 #define grinder_evict(subport, pos)
2839 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2841 struct rte_sched_grinder *grinder = subport->grinder + pos;
2843 rte_prefetch0(grinder->pipe);
2844 rte_prefetch0(grinder->queue[0]);
2848 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2850 struct rte_sched_grinder *grinder = subport->grinder + pos;
2851 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2853 qsize = grinder->qsize;
2856 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2857 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2859 rte_prefetch0(grinder->qbase[0] + qr[0]);
2863 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2864 qr[1] = grinder->queue[1]->qr & (qsize - 1);
2865 qr[2] = grinder->queue[2]->qr & (qsize - 1);
2866 qr[3] = grinder->queue[3]->qr & (qsize - 1);
2868 rte_prefetch0(grinder->qbase[0] + qr[0]);
2869 rte_prefetch0(grinder->qbase[1] + qr[1]);
2871 grinder_wrr_load(subport, pos);
2872 grinder_wrr(subport, pos);
2874 rte_prefetch0(grinder->qbase[2] + qr[2]);
2875 rte_prefetch0(grinder->qbase[3] + qr[3]);
2879 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2881 struct rte_sched_grinder *grinder = subport->grinder + pos;
2882 uint32_t qpos = grinder->qpos;
2883 struct rte_mbuf **qbase = grinder->qbase[qpos];
2884 uint16_t qsize = grinder->qsize;
2885 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2887 grinder->pkt = qbase[qr];
2888 rte_prefetch0(grinder->pkt);
2890 if (unlikely((qr & 0x7) == 7)) {
2891 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2893 rte_prefetch0(qbase + qr_next);
2897 static inline uint32_t
2898 grinder_handle(struct rte_sched_port *port,
2899 struct rte_sched_subport *subport, uint32_t pos)
2901 struct rte_sched_grinder *grinder = subport->grinder + pos;
2903 switch (grinder->state) {
2904 case e_GRINDER_PREFETCH_PIPE:
2906 if (grinder_next_pipe(port, subport, pos)) {
2907 grinder_prefetch_pipe(subport, pos);
2908 subport->busy_grinders++;
2910 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2917 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2919 struct rte_sched_pipe *pipe = grinder->pipe;
2921 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2922 grinder->subport_params = port->subport_profiles +
2925 grinder_prefetch_tc_queue_arrays(subport, pos);
2926 grinder_credits_update(port, subport, pos);
2928 grinder->state = e_GRINDER_PREFETCH_MBUF;
2932 case e_GRINDER_PREFETCH_MBUF:
2934 grinder_prefetch_mbuf(subport, pos);
2936 grinder->state = e_GRINDER_READ_MBUF;
2940 case e_GRINDER_READ_MBUF:
2942 uint32_t wrr_active, result = 0;
2944 result = grinder_schedule(port, subport, pos);
2946 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2948 /* Look for next packet within the same TC */
2949 if (result && grinder->qmask) {
2951 grinder_wrr(subport, pos);
2953 grinder_prefetch_mbuf(subport, pos);
2959 grinder_wrr_store(subport, pos);
2961 /* Look for another active TC within same pipe */
2962 if (grinder_next_tc(port, subport, pos)) {
2963 grinder_prefetch_tc_queue_arrays(subport, pos);
2965 grinder->state = e_GRINDER_PREFETCH_MBUF;
2969 if (grinder->productive == 0 &&
2970 subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2971 subport->pipe_loop = grinder->pindex;
2973 grinder_evict(subport, pos);
2975 /* Look for another active pipe */
2976 if (grinder_next_pipe(port, subport, pos)) {
2977 grinder_prefetch_pipe(subport, pos);
2979 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2983 /* No active pipe found */
2984 subport->busy_grinders--;
2986 grinder->state = e_GRINDER_PREFETCH_PIPE;
2991 rte_panic("Algorithmic error (invalid state)\n");
2997 rte_sched_port_time_resync(struct rte_sched_port *port)
2999 uint64_t cycles = rte_get_tsc_cycles();
3000 uint64_t cycles_diff;
3001 uint64_t bytes_diff;
3004 if (cycles < port->time_cpu_cycles)
3005 port->time_cpu_cycles = 0;
3007 cycles_diff = cycles - port->time_cpu_cycles;
3008 /* Compute elapsed time in bytes */
3009 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
3010 port->inv_cycles_per_byte);
3012 /* Advance port time */
3013 port->time_cpu_cycles +=
3014 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
3015 port->time_cpu_bytes += bytes_diff;
3016 if (port->time < port->time_cpu_bytes)
3017 port->time = port->time_cpu_bytes;
3019 /* Reset pipe loop detection */
3020 for (i = 0; i < port->n_subports_per_port; i++)
3021 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
3025 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
3029 /* Check if any exception flag is set */
3030 exceptions = (second_pass && subport->busy_grinders == 0) ||
3031 (subport->pipe_exhaustion == 1);
3033 /* Clear exception flags */
3034 subport->pipe_exhaustion = 0;
3040 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
3042 struct rte_sched_subport *subport;
3043 uint32_t subport_id = port->subport_id;
3044 uint32_t i, n_subports = 0, count;
3046 port->pkts_out = pkts;
3047 port->n_pkts_out = 0;
3049 rte_sched_port_time_resync(port);
3051 /* Take each queue in the grinder one step further */
3052 for (i = 0, count = 0; ; i++) {
3053 subport = port->subports[subport_id];
3055 count += grinder_handle(port, subport,
3056 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
3058 if (count == n_pkts) {
3061 if (subport_id == port->n_subports_per_port)
3064 port->subport_id = subport_id;
3068 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
3074 if (subport_id == port->n_subports_per_port)
3077 if (n_subports == port->n_subports_per_port) {
3078 port->subport_id = subport_id;