1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
27 #ifdef RTE_SCHED_VECTOR
31 #define SCHED_VECTOR_SSE4
32 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
33 #define SCHED_VECTOR_NEON
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
39 #define RTE_SCHED_WRR_SHIFT 3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
45 /* Scaling for cycles_per_byte calculation
46 * Chosen so that minimum rate is 480 bit/sec
48 #define RTE_SCHED_TIME_SHIFT 8
50 struct rte_sched_pipe_profile {
51 /* Token bucket (TB) */
53 uint32_t tb_credits_per_period;
56 /* Pipe traffic classes */
58 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
61 /* Pipe best-effort traffic class queues */
62 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
65 struct rte_sched_pipe {
66 /* Token bucket (TB) */
67 uint64_t tb_time; /* time of last update */
70 /* Pipe profile and flags */
73 /* Traffic classes (TCs) */
74 uint64_t tc_time; /* time of next update */
75 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
77 /* Weighted Round Robin (WRR) */
78 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
80 /* TC oversubscription */
81 uint32_t tc_ov_credits;
82 uint8_t tc_ov_period_id;
83 } __rte_cache_aligned;
85 struct rte_sched_queue {
90 struct rte_sched_queue_extra {
91 struct rte_sched_queue_stats stats;
98 e_GRINDER_PREFETCH_PIPE = 0,
99 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
100 e_GRINDER_PREFETCH_MBUF,
104 struct rte_sched_grinder {
106 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
107 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
112 enum grinder_state state;
115 struct rte_sched_subport *subport;
116 struct rte_sched_pipe *pipe;
117 struct rte_sched_pipe_profile *pipe_params;
120 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
121 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
127 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
128 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
129 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
133 struct rte_mbuf *pkt;
136 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
137 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
138 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
141 struct rte_sched_subport {
142 /* Token bucket (TB) */
143 uint64_t tb_time; /* time of last update */
145 uint32_t tb_credits_per_period;
149 /* Traffic classes (TCs) */
150 uint64_t tc_time; /* time of next update */
151 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
152 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
155 /* TC oversubscription */
157 uint32_t tc_ov_wm_min;
158 uint32_t tc_ov_wm_max;
159 uint8_t tc_ov_period_id;
165 struct rte_sched_subport_stats stats;
168 uint32_t n_pipes_per_subport_enabled;
169 uint32_t n_pipe_profiles;
170 uint32_t n_max_pipe_profiles;
172 /* Pipe best-effort TC rate */
173 uint32_t pipe_tc_be_rate_max;
175 /* Pipe queues size */
176 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
179 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
182 /* Scheduling loop detection */
184 uint32_t pipe_exhaustion;
187 struct rte_bitmap *bmp;
188 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
191 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
192 uint32_t busy_grinders;
194 /* Queue base calculation */
195 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
198 struct rte_sched_pipe *pipe;
199 struct rte_sched_queue *queue;
200 struct rte_sched_queue_extra *queue_extra;
201 struct rte_sched_pipe_profile *pipe_profiles;
203 struct rte_mbuf **queue_array;
204 uint8_t memory[0] __rte_cache_aligned;
205 } __rte_cache_aligned;
207 struct rte_sched_port {
208 /* User parameters */
209 uint32_t n_subports_per_port;
210 uint32_t n_pipes_per_subport;
211 uint32_t n_pipes_per_subport_log2;
212 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
213 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
214 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
217 uint32_t frame_overhead;
219 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
220 uint32_t n_pipe_profiles;
221 uint32_t n_max_pipe_profiles;
222 uint32_t pipe_tc_be_rate_max;
224 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
228 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
229 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
230 uint64_t time; /* Current NIC TX time measured in bytes */
231 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
233 /* Scheduling loop detection */
235 uint32_t pipe_exhaustion;
238 struct rte_bitmap *bmp;
239 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
242 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
243 uint32_t busy_grinders;
244 struct rte_mbuf **pkts_out;
248 /* Queue base calculation */
249 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
252 /* Large data structures */
253 struct rte_sched_subport *subports[0];
254 struct rte_sched_subport *subport;
255 struct rte_sched_pipe *pipe;
256 struct rte_sched_queue *queue;
257 struct rte_sched_queue_extra *queue_extra;
258 struct rte_sched_pipe_profile *pipe_profiles;
260 struct rte_mbuf **queue_array;
261 uint8_t memory[0] __rte_cache_aligned;
262 } __rte_cache_aligned;
264 enum rte_sched_port_array {
265 e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
266 e_RTE_SCHED_PORT_ARRAY_PIPE,
267 e_RTE_SCHED_PORT_ARRAY_QUEUE,
268 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
269 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
270 e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
271 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
272 e_RTE_SCHED_PORT_ARRAY_TOTAL,
275 enum rte_sched_subport_array {
276 e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
277 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
278 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
279 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
280 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
281 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
282 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
285 static inline uint32_t
286 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
288 return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
291 static inline struct rte_mbuf **
292 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
294 uint32_t pindex = qindex >> 4;
295 uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
297 return (subport->queue_array + pindex *
298 subport->qsize_sum + subport->qsize_add[qpos]);
301 static inline uint16_t
302 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
303 struct rte_sched_subport *subport, uint32_t qindex)
305 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
307 return subport->qsize[tc];
310 static inline uint32_t
311 rte_sched_port_queues_per_port(struct rte_sched_port *port)
313 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
316 static inline uint16_t
317 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
319 uint16_t pipe_queue = port->pipe_queue[traffic_class];
324 static inline uint8_t
325 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
327 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
332 static inline uint8_t
333 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
335 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
341 pipe_profile_check(struct rte_sched_pipe_params *params,
342 uint32_t rate, uint16_t *qsize)
346 /* Pipe parameters */
347 if (params == NULL) {
349 "%s: Incorrect value for parameter params\n", __func__);
353 /* TB rate: non-zero, not greater than port rate */
354 if (params->tb_rate == 0 ||
355 params->tb_rate > rate) {
357 "%s: Incorrect value for tb rate\n", __func__);
361 /* TB size: non-zero */
362 if (params->tb_size == 0) {
364 "%s: Incorrect value for tb size\n", __func__);
368 /* TC rate: non-zero if qsize non-zero, less than pipe rate */
369 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
370 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
371 (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
372 params->tc_rate[i] > params->tb_rate))) {
374 "%s: Incorrect value for qsize or tc_rate\n", __func__);
379 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
380 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
382 "%s: Incorrect value for be traffic class rate\n", __func__);
386 /* TC period: non-zero */
387 if (params->tc_period == 0) {
389 "%s: Incorrect value for tc period\n", __func__);
393 /* Best effort tc oversubscription weight: non-zero */
394 if (params->tc_ov_weight == 0) {
396 "%s: Incorrect value for tc ov weight\n", __func__);
400 /* Queue WRR weights: non-zero */
401 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
402 if (params->wrr_weights[i] == 0) {
404 "%s: Incorrect value for wrr weight\n", __func__);
413 rte_sched_port_check_params(struct rte_sched_port_params *params)
415 if (params == NULL) {
417 "%s: Incorrect value for parameter params\n", __func__);
422 if (params->socket < 0) {
424 "%s: Incorrect value for socket id\n", __func__);
429 if (params->rate == 0) {
431 "%s: Incorrect value for rate\n", __func__);
436 if (params->mtu == 0) {
438 "%s: Incorrect value for mtu\n", __func__);
442 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
443 if (params->n_subports_per_port == 0 ||
444 params->n_subports_per_port > 1u << 16 ||
445 !rte_is_power_of_2(params->n_subports_per_port)) {
447 "%s: Incorrect value for number of subports\n", __func__);
451 /* n_pipes_per_subport: non-zero, power of 2 */
452 if (params->n_pipes_per_subport == 0 ||
453 !rte_is_power_of_2(params->n_pipes_per_subport)) {
455 "%s: Incorrect value for maximum pipes number\n", __func__);
463 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
464 enum rte_sched_subport_array array)
466 uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
467 uint32_t n_subport_pipe_queues =
468 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
470 uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
471 uint32_t size_queue =
472 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
473 uint32_t size_queue_extra
474 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
475 uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
476 sizeof(struct rte_sched_pipe_profile);
477 uint32_t size_bmp_array =
478 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
479 uint32_t size_per_pipe_queue_array, size_queue_array;
483 size_per_pipe_queue_array = 0;
484 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
485 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
486 size_per_pipe_queue_array +=
487 params->qsize[i] * sizeof(struct rte_mbuf *);
489 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
490 params->qsize[i] * sizeof(struct rte_mbuf *);
492 size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
496 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
498 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
500 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
502 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
504 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
506 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
508 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
510 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
512 if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
514 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
516 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
518 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
524 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
528 subport->qsize_add[0] = 0;
530 /* Strict prority traffic class */
531 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
532 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
534 /* Best-effort traffic class */
535 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
536 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
537 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
538 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
539 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
540 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
541 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
542 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
543 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
545 subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
546 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
550 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
552 struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
554 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
555 " Token bucket: period = %u, credits per period = %u, size = %u\n"
556 " Traffic classes: period = %u,\n"
557 " credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u]\n"
558 " Best-effort traffic class oversubscription: weight = %hhu\n"
559 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
564 p->tb_credits_per_period,
567 /* Traffic classes */
569 p->tc_credits_per_period[0],
570 p->tc_credits_per_period[1],
571 p->tc_credits_per_period[2],
572 p->tc_credits_per_period[3],
573 p->tc_credits_per_period[4],
574 p->tc_credits_per_period[5],
575 p->tc_credits_per_period[6],
576 p->tc_credits_per_period[7],
577 p->tc_credits_per_period[8],
578 p->tc_credits_per_period[9],
579 p->tc_credits_per_period[10],
580 p->tc_credits_per_period[11],
581 p->tc_credits_per_period[12],
583 /* Best-effort traffic class oversubscription */
587 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
590 static inline uint64_t
591 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
593 uint64_t time = time_ms;
595 time = (time * rate) / 1000;
601 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
602 struct rte_sched_pipe_params *src,
603 struct rte_sched_pipe_profile *dst,
606 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
607 uint32_t lcd1, lcd2, lcd;
611 if (src->tb_rate == rate) {
612 dst->tb_credits_per_period = 1;
615 double tb_rate = (double) src->tb_rate
617 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
619 rte_approx(tb_rate, d,
620 &dst->tb_credits_per_period, &dst->tb_period);
623 dst->tb_size = src->tb_size;
625 /* Traffic Classes */
626 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
629 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
630 if (subport->qsize[i])
631 dst->tc_credits_per_period[i]
632 = rte_sched_time_ms_to_bytes(src->tc_period,
635 dst->tc_ov_weight = src->tc_ov_weight;
638 wrr_cost[0] = src->wrr_weights[0];
639 wrr_cost[1] = src->wrr_weights[1];
640 wrr_cost[2] = src->wrr_weights[2];
641 wrr_cost[3] = src->wrr_weights[3];
643 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
644 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
645 lcd = rte_get_lcd(lcd1, lcd2);
647 wrr_cost[0] = lcd / wrr_cost[0];
648 wrr_cost[1] = lcd / wrr_cost[1];
649 wrr_cost[2] = lcd / wrr_cost[2];
650 wrr_cost[3] = lcd / wrr_cost[3];
652 dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
653 dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
654 dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
655 dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
659 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
660 struct rte_sched_subport_params *params, uint32_t rate)
664 for (i = 0; i < subport->n_pipe_profiles; i++) {
665 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
666 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
668 rte_sched_pipe_profile_convert(subport, src, dst, rate);
669 rte_sched_port_log_pipe_profile(subport, i);
672 subport->pipe_tc_be_rate_max = 0;
673 for (i = 0; i < subport->n_pipe_profiles; i++) {
674 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
675 uint32_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
677 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
678 subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
683 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
684 uint32_t n_max_pipes_per_subport,
689 /* Check user parameters */
690 if (params == NULL) {
692 "%s: Incorrect value for parameter params\n", __func__);
696 if (params->tb_rate == 0 || params->tb_rate > rate) {
698 "%s: Incorrect value for tb rate\n", __func__);
702 if (params->tb_size == 0) {
704 "%s: Incorrect value for tb size\n", __func__);
708 /* qsize: if non-zero, power of 2,
709 * no bigger than 32K (due to 16-bit read/write pointers)
711 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
712 uint16_t qsize = params->qsize[i];
714 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
716 "%s: Incorrect value for qsize\n", __func__);
721 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
722 uint32_t tc_rate = params->tc_rate[i];
723 uint16_t qsize = params->qsize[i];
725 if ((qsize == 0 && tc_rate != 0) ||
726 (qsize != 0 && tc_rate == 0) ||
727 (tc_rate > params->tb_rate)) {
729 "%s: Incorrect value for tc rate\n", __func__);
734 if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
735 params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
737 "%s: Incorrect qsize or tc rate(best effort)\n", __func__);
741 if (params->tc_period == 0) {
743 "%s: Incorrect value for tc period\n", __func__);
747 /* n_pipes_per_subport: non-zero, power of 2 */
748 if (params->n_pipes_per_subport_enabled == 0 ||
749 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
750 !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
752 "%s: Incorrect value for pipes number\n", __func__);
756 /* pipe_profiles and n_pipe_profiles */
757 if (params->pipe_profiles == NULL ||
758 params->n_pipe_profiles == 0 ||
759 params->n_max_pipe_profiles == 0 ||
760 params->n_pipe_profiles > params->n_max_pipe_profiles) {
762 "%s: Incorrect value for pipe profiles\n", __func__);
766 for (i = 0; i < params->n_pipe_profiles; i++) {
767 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
770 status = pipe_profile_check(p, rate, ¶ms->qsize[0]);
773 "%s: Pipe profile check failed(%d)\n", __func__, status);
782 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
783 struct rte_sched_subport_params **subport_params)
785 uint32_t size0 = 0, size1 = 0, i;
788 status = rte_sched_port_check_params(port_params);
791 "%s: Port scheduler port params check failed (%d)\n",
797 for (i = 0; i < port_params->n_subports_per_port; i++) {
798 struct rte_sched_subport_params *sp = subport_params[i];
800 status = rte_sched_subport_check_params(sp,
801 port_params->n_pipes_per_subport,
805 "%s: Port scheduler subport params check failed (%d)\n",
812 size0 = sizeof(struct rte_sched_port);
814 for (i = 0; i < port_params->n_subports_per_port; i++) {
815 struct rte_sched_subport_params *sp = subport_params[i];
817 size1 += rte_sched_subport_get_array_base(sp,
818 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
821 return size0 + size1;
824 struct rte_sched_port *
825 rte_sched_port_config(struct rte_sched_port_params *params)
827 struct rte_sched_port *port = NULL;
828 uint32_t size0, size1;
829 uint32_t cycles_per_byte;
833 status = rte_sched_port_check_params(params);
836 "%s: Port scheduler params check failed (%d)\n",
841 size0 = sizeof(struct rte_sched_port);
842 size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
844 /* Allocate memory to store the data structures */
845 port = rte_zmalloc_socket("qos_params", size0 + size1, RTE_CACHE_LINE_SIZE,
848 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
853 /* User parameters */
854 port->n_subports_per_port = params->n_subports_per_port;
855 port->n_pipes_per_subport = params->n_pipes_per_subport;
856 port->n_pipes_per_subport_log2 =
857 __builtin_ctz(params->n_pipes_per_subport);
858 port->socket = params->socket;
860 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
861 port->pipe_queue[i] = i;
863 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
864 port->pipe_tc[i] = j;
866 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
870 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
871 port->tc_queue[i] = j;
873 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
876 port->rate = params->rate;
877 port->mtu = params->mtu + params->frame_overhead;
878 port->frame_overhead = params->frame_overhead;
881 port->time_cpu_cycles = rte_get_tsc_cycles();
882 port->time_cpu_bytes = 0;
885 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
887 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
890 port->pkts_out = NULL;
891 port->n_pkts_out = 0;
892 port->subport_id = 0;
898 rte_sched_subport_free(struct rte_sched_port *port,
899 struct rte_sched_subport *subport)
901 uint32_t n_subport_pipe_queues;
907 n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
909 /* Free enqueued mbufs */
910 for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
911 struct rte_mbuf **mbufs =
912 rte_sched_subport_pipe_qbase(subport, qindex);
913 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
915 struct rte_sched_queue *queue = subport->queue + qindex;
916 uint16_t qr = queue->qr & (qsize - 1);
917 uint16_t qw = queue->qw & (qsize - 1);
919 for (; qr != qw; qr = (qr + 1) & (qsize - 1))
920 rte_pktmbuf_free(mbufs[qr]);
924 rte_bitmap_free(subport->bmp);
928 rte_sched_port_free(struct rte_sched_port *port)
932 /* Check user parameters */
936 for (i = 0; i < port->n_subports_per_port; i++)
937 rte_sched_subport_free(port, port->subports[i]);
943 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
945 struct rte_sched_subport *s = port->subports[i];
947 RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
948 " Token bucket: period = %u, credits per period = %u, size = %u\n"
949 " Traffic classes: period = %u\n"
950 " credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u]\n"
951 " Best effort traffic class oversubscription: wm min = %u, wm max = %u\n",
956 s->tb_credits_per_period,
959 /* Traffic classes */
961 s->tc_credits_per_period[0],
962 s->tc_credits_per_period[1],
963 s->tc_credits_per_period[2],
964 s->tc_credits_per_period[3],
965 s->tc_credits_per_period[4],
966 s->tc_credits_per_period[5],
967 s->tc_credits_per_period[6],
968 s->tc_credits_per_period[7],
969 s->tc_credits_per_period[8],
970 s->tc_credits_per_period[9],
971 s->tc_credits_per_period[10],
972 s->tc_credits_per_period[11],
973 s->tc_credits_per_period[12],
975 /* Best effort traffic class oversubscription */
981 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
985 for (i = 0; i < n_subports; i++) {
986 struct rte_sched_subport *subport = port->subports[i];
988 rte_sched_subport_free(port, subport);
995 rte_sched_subport_config(struct rte_sched_port *port,
997 struct rte_sched_subport_params *params)
999 struct rte_sched_subport *s = NULL;
1000 uint32_t n_subports = subport_id;
1001 uint32_t n_subport_pipe_queues, i;
1002 uint32_t size0, size1, bmp_mem_size;
1005 /* Check user parameters */
1008 "%s: Incorrect value for parameter port\n", __func__);
1012 if (subport_id >= port->n_subports_per_port) {
1014 "%s: Incorrect value for subport id\n", __func__);
1016 rte_sched_free_memory(port, n_subports);
1020 status = rte_sched_subport_check_params(params,
1021 port->n_pipes_per_subport,
1024 RTE_LOG(NOTICE, SCHED,
1025 "%s: Port scheduler params check failed (%d)\n",
1028 rte_sched_free_memory(port, n_subports);
1032 /* Determine the amount of memory to allocate */
1033 size0 = sizeof(struct rte_sched_subport);
1034 size1 = rte_sched_subport_get_array_base(params,
1035 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1037 /* Allocate memory to store the data structures */
1038 s = rte_zmalloc_socket("subport_params", size0 + size1,
1039 RTE_CACHE_LINE_SIZE, port->socket);
1042 "%s: Memory allocation fails\n", __func__);
1044 rte_sched_free_memory(port, n_subports);
1051 port->subports[subport_id] = s;
1053 /* Token Bucket (TB) */
1054 if (params->tb_rate == port->rate) {
1055 s->tb_credits_per_period = 1;
1058 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
1059 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
1061 rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
1064 s->tb_size = params->tb_size;
1065 s->tb_time = port->time;
1066 s->tb_credits = s->tb_size / 2;
1068 /* Traffic Classes (TCs) */
1069 s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
1070 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1071 if (params->qsize[i])
1072 s->tc_credits_per_period[i]
1073 = rte_sched_time_ms_to_bytes(params->tc_period,
1074 params->tc_rate[i]);
1076 s->tc_time = port->time + s->tc_period;
1077 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1078 if (params->qsize[i])
1079 s->tc_credits[i] = s->tc_credits_per_period[i];
1081 /* compile time checks */
1082 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1083 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1084 (RTE_SCHED_PORT_N_GRINDERS - 1));
1086 /* User parameters */
1087 s->n_pipes_per_subport_enabled = params->n_pipes_per_subport_enabled;
1088 memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1089 s->n_pipe_profiles = params->n_pipe_profiles;
1090 s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1092 #ifdef RTE_SCHED_RED
1093 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1096 for (j = 0; j < RTE_COLORS; j++) {
1097 /* if min/max are both zero, then RED is disabled */
1098 if ((params->red_params[i][j].min_th |
1099 params->red_params[i][j].max_th) == 0) {
1103 if (rte_red_config_init(&s->red_config[i][j],
1104 params->red_params[i][j].wq_log2,
1105 params->red_params[i][j].min_th,
1106 params->red_params[i][j].max_th,
1107 params->red_params[i][j].maxp_inv) != 0) {
1108 rte_sched_free_memory(port, n_subports);
1110 RTE_LOG(NOTICE, SCHED,
1111 "%s: RED configuration init fails\n", __func__);
1118 /* Scheduling loop detection */
1119 s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1120 s->pipe_exhaustion = 0;
1123 s->busy_grinders = 0;
1125 /* Queue base calculation */
1126 rte_sched_subport_config_qsize(s);
1128 /* Large data structures */
1129 s->pipe = (struct rte_sched_pipe *)
1130 (s->memory + rte_sched_subport_get_array_base(params,
1131 e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1132 s->queue = (struct rte_sched_queue *)
1133 (s->memory + rte_sched_subport_get_array_base(params,
1134 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1135 s->queue_extra = (struct rte_sched_queue_extra *)
1136 (s->memory + rte_sched_subport_get_array_base(params,
1137 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1138 s->pipe_profiles = (struct rte_sched_pipe_profile *)
1139 (s->memory + rte_sched_subport_get_array_base(params,
1140 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1141 s->bmp_array = s->memory + rte_sched_subport_get_array_base(params,
1142 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1143 s->queue_array = (struct rte_mbuf **)
1144 (s->memory + rte_sched_subport_get_array_base(params,
1145 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1147 /* Pipe profile table */
1148 rte_sched_subport_config_pipe_profile_table(s, params, port->rate);
1151 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1152 bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
1153 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1155 if (s->bmp == NULL) {
1157 "%s: Subport bitmap init error\n", __func__);
1159 rte_sched_free_memory(port, n_subports);
1163 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1164 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1166 #ifdef RTE_SCHED_SUBPORT_TC_OV
1167 /* TC oversubscription */
1168 s->tc_ov_wm_min = port->mtu;
1169 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
1170 s->pipe_tc_be_rate_max);
1171 s->tc_ov_wm = s->tc_ov_wm_max;
1172 s->tc_ov_period_id = 0;
1178 rte_sched_port_log_subport_config(port, subport_id);
1184 rte_sched_pipe_config(struct rte_sched_port *port,
1185 uint32_t subport_id,
1187 int32_t pipe_profile)
1189 struct rte_sched_subport *s;
1190 struct rte_sched_pipe *p;
1191 struct rte_sched_pipe_profile *params;
1192 uint32_t n_subports = subport_id + 1;
1193 uint32_t deactivate, profile, i;
1195 /* Check user parameters */
1196 profile = (uint32_t) pipe_profile;
1197 deactivate = (pipe_profile < 0);
1201 "%s: Incorrect value for parameter port\n", __func__);
1205 if (subport_id >= port->n_subports_per_port) {
1207 "%s: Incorrect value for parameter subport id\n", __func__);
1209 rte_sched_free_memory(port, n_subports);
1213 s = port->subports[subport_id];
1214 if (pipe_id >= s->n_pipes_per_subport_enabled) {
1216 "%s: Incorrect value for parameter pipe id\n", __func__);
1218 rte_sched_free_memory(port, n_subports);
1222 if (!deactivate && profile >= s->n_pipe_profiles) {
1224 "%s: Incorrect value for parameter pipe profile\n", __func__);
1226 rte_sched_free_memory(port, n_subports);
1230 /* Handle the case when pipe already has a valid configuration */
1231 p = s->pipe + pipe_id;
1233 params = s->pipe_profiles + p->profile;
1235 double subport_tc_be_rate =
1236 (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1237 / (double) s->tc_period;
1238 double pipe_tc_be_rate =
1239 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1240 / (double) params->tc_period;
1241 uint32_t tc_be_ov = s->tc_ov;
1243 /* Unplug pipe from its subport */
1244 s->tc_ov_n -= params->tc_ov_weight;
1245 s->tc_ov_rate -= pipe_tc_be_rate;
1246 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1248 if (s->tc_ov != tc_be_ov) {
1249 RTE_LOG(DEBUG, SCHED,
1250 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1251 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1254 /* Reset the pipe */
1255 memset(p, 0, sizeof(struct rte_sched_pipe));
1261 /* Apply the new pipe configuration */
1262 p->profile = profile;
1263 params = s->pipe_profiles + p->profile;
1265 /* Token Bucket (TB) */
1266 p->tb_time = port->time;
1267 p->tb_credits = params->tb_size / 2;
1269 /* Traffic Classes (TCs) */
1270 p->tc_time = port->time + params->tc_period;
1272 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1274 p->tc_credits[i] = params->tc_credits_per_period[i];
1277 /* Subport best effort tc oversubscription */
1278 double subport_tc_be_rate =
1279 (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1280 / (double) s->tc_period;
1281 double pipe_tc_be_rate =
1282 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1283 / (double) params->tc_period;
1284 uint32_t tc_be_ov = s->tc_ov;
1286 s->tc_ov_n += params->tc_ov_weight;
1287 s->tc_ov_rate += pipe_tc_be_rate;
1288 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1290 if (s->tc_ov != tc_be_ov) {
1291 RTE_LOG(DEBUG, SCHED,
1292 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1293 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1295 p->tc_ov_period_id = s->tc_ov_period_id;
1296 p->tc_ov_credits = s->tc_ov_wm;
1303 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1304 uint32_t subport_id,
1305 struct rte_sched_pipe_params *params,
1306 uint32_t *pipe_profile_id)
1308 struct rte_sched_subport *s;
1309 struct rte_sched_pipe_profile *pp;
1316 "%s: Incorrect value for parameter port\n", __func__);
1320 /* Subport id not exceeds the max limit */
1321 if (subport_id > port->n_subports_per_port) {
1323 "%s: Incorrect value for subport id\n", __func__);
1327 s = port->subports[subport_id];
1329 /* Pipe profiles exceeds the max limit */
1330 if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1332 "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1337 status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1340 "%s: Pipe profile check failed(%d)\n", __func__, status);
1344 pp = &s->pipe_profiles[s->n_pipe_profiles];
1345 rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1347 /* Pipe profile should not exists */
1348 for (i = 0; i < s->n_pipe_profiles; i++)
1349 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1351 "%s: Pipe profile exists\n", __func__);
1355 /* Pipe profile commit */
1356 *pipe_profile_id = s->n_pipe_profiles;
1357 s->n_pipe_profiles++;
1359 if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1360 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1362 rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1367 static inline uint32_t
1368 rte_sched_port_qindex(struct rte_sched_port *port,
1371 uint32_t traffic_class,
1374 return ((subport & (port->n_subports_per_port - 1)) <<
1375 (port->n_pipes_per_subport_log2 + 4)) |
1377 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1378 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1379 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1383 rte_sched_port_pkt_write(struct rte_sched_port *port,
1384 struct rte_mbuf *pkt,
1385 uint32_t subport, uint32_t pipe,
1386 uint32_t traffic_class,
1387 uint32_t queue, enum rte_color color)
1390 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1392 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1396 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1397 const struct rte_mbuf *pkt,
1398 uint32_t *subport, uint32_t *pipe,
1399 uint32_t *traffic_class, uint32_t *queue)
1401 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1403 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1404 *pipe = (queue_id >> 4) &
1405 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1406 *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1407 *queue = rte_sched_port_tc_queue(port, queue_id);
1411 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1413 return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1417 rte_sched_subport_read_stats(struct rte_sched_port *port,
1418 uint32_t subport_id,
1419 struct rte_sched_subport_stats *stats,
1422 struct rte_sched_subport *s;
1424 /* Check user parameters */
1427 "%s: Incorrect value for parameter port\n", __func__);
1431 if (subport_id >= port->n_subports_per_port) {
1433 "%s: Incorrect value for subport id\n", __func__);
1437 if (stats == NULL) {
1439 "%s: Incorrect value for parameter stats\n", __func__);
1443 if (tc_ov == NULL) {
1445 "%s: Incorrect value for tc_ov\n", __func__);
1449 s = port->subports[subport_id];
1451 /* Copy subport stats and clear */
1452 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1453 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1455 /* Subport TC oversubscription status */
1462 rte_sched_queue_read_stats(struct rte_sched_port *port,
1464 struct rte_sched_queue_stats *stats,
1467 struct rte_sched_queue *q;
1468 struct rte_sched_queue_extra *qe;
1470 /* Check user parameters */
1473 "%s: Incorrect value for parameter port\n", __func__);
1477 if (queue_id >= rte_sched_port_queues_per_port(port)) {
1479 "%s: Incorrect value for queue id\n", __func__);
1483 if (stats == NULL) {
1485 "%s: Incorrect value for parameter stats\n", __func__);
1491 "%s: Incorrect value for parameter qlen\n", __func__);
1494 q = port->queue + queue_id;
1495 qe = port->queue_extra + queue_id;
1497 /* Copy queue stats and clear */
1498 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1499 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1502 *qlen = q->qw - q->qr;
1507 #ifdef RTE_SCHED_DEBUG
1510 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1513 struct rte_sched_queue *queue = subport->queue + qindex;
1515 return queue->qr == queue->qw;
1518 #endif /* RTE_SCHED_DEBUG */
1520 #ifdef RTE_SCHED_COLLECT_STATS
1523 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1524 struct rte_sched_subport *subport,
1526 struct rte_mbuf *pkt)
1528 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1529 uint32_t pkt_len = pkt->pkt_len;
1531 subport->stats.n_pkts_tc[tc_index] += 1;
1532 subport->stats.n_bytes_tc[tc_index] += pkt_len;
1535 #ifdef RTE_SCHED_RED
1537 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1538 struct rte_sched_subport *subport,
1540 struct rte_mbuf *pkt,
1544 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1545 struct rte_sched_subport *subport,
1547 struct rte_mbuf *pkt,
1548 __rte_unused uint32_t red)
1551 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1552 uint32_t pkt_len = pkt->pkt_len;
1554 subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1555 subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1556 #ifdef RTE_SCHED_RED
1557 subport->stats.n_pkts_red_dropped[tc_index] += red;
1562 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1564 struct rte_mbuf *pkt)
1566 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1567 uint32_t pkt_len = pkt->pkt_len;
1569 qe->stats.n_pkts += 1;
1570 qe->stats.n_bytes += pkt_len;
1573 #ifdef RTE_SCHED_RED
1575 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1577 struct rte_mbuf *pkt,
1581 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1583 struct rte_mbuf *pkt,
1584 __rte_unused uint32_t red)
1587 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1588 uint32_t pkt_len = pkt->pkt_len;
1590 qe->stats.n_pkts_dropped += 1;
1591 qe->stats.n_bytes_dropped += pkt_len;
1592 #ifdef RTE_SCHED_RED
1593 qe->stats.n_pkts_red_dropped += red;
1597 #endif /* RTE_SCHED_COLLECT_STATS */
1599 #ifdef RTE_SCHED_RED
1602 rte_sched_port_red_drop(struct rte_sched_port *port,
1603 struct rte_sched_subport *subport,
1604 struct rte_mbuf *pkt,
1608 struct rte_sched_queue_extra *qe;
1609 struct rte_red_config *red_cfg;
1610 struct rte_red *red;
1612 enum rte_color color;
1614 tc_index = rte_sched_port_pipe_tc(port, qindex);
1615 color = rte_sched_port_pkt_read_color(pkt);
1616 red_cfg = &subport->red_config[tc_index][color];
1618 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1621 qe = subport->queue_extra + qindex;
1624 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1628 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,
1629 struct rte_sched_subport *subport, uint32_t qindex)
1631 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1632 struct rte_red *red = &qe->red;
1634 rte_red_mark_queue_empty(red, port->time);
1639 static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,
1640 struct rte_sched_subport *subport __rte_unused,
1641 struct rte_mbuf *pkt __rte_unused,
1642 uint32_t qindex __rte_unused,
1643 uint16_t qlen __rte_unused)
1648 #define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex)
1650 #endif /* RTE_SCHED_RED */
1652 #ifdef RTE_SCHED_DEBUG
1655 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1662 rte_panic("Empty slab at position %u\n", bmp_pos);
1665 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1666 if (mask & bmp_slab) {
1667 if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1668 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1675 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1679 #endif /* RTE_SCHED_DEBUG */
1681 static inline struct rte_sched_subport *
1682 rte_sched_port_subport(struct rte_sched_port *port,
1683 struct rte_mbuf *pkt)
1685 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1686 uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1688 return port->subports[subport_id];
1691 static inline uint32_t
1692 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1693 struct rte_mbuf *pkt, uint32_t subport_qmask)
1695 struct rte_sched_queue *q;
1696 #ifdef RTE_SCHED_COLLECT_STATS
1697 struct rte_sched_queue_extra *qe;
1699 uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1700 uint32_t subport_queue_id = subport_qmask & qindex;
1702 q = subport->queue + subport_queue_id;
1704 #ifdef RTE_SCHED_COLLECT_STATS
1705 qe = subport->queue_extra + subport_queue_id;
1709 return subport_queue_id;
1713 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1714 struct rte_sched_subport *subport,
1716 struct rte_mbuf **qbase)
1718 struct rte_sched_queue *q;
1719 struct rte_mbuf **q_qw;
1722 q = subport->queue + qindex;
1723 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1724 q_qw = qbase + (q->qw & (qsize - 1));
1726 rte_prefetch0(q_qw);
1727 rte_bitmap_prefetch0(subport->bmp, qindex);
1731 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
1732 struct rte_sched_subport *subport,
1734 struct rte_mbuf **qbase,
1735 struct rte_mbuf *pkt)
1737 struct rte_sched_queue *q;
1741 q = subport->queue + qindex;
1742 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1743 qlen = q->qw - q->qr;
1745 /* Drop the packet (and update drop stats) when queue is full */
1746 if (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||
1748 rte_pktmbuf_free(pkt);
1749 #ifdef RTE_SCHED_COLLECT_STATS
1750 rte_sched_port_update_subport_stats_on_drop(port, subport,
1751 qindex, pkt, qlen < qsize);
1752 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
1758 /* Enqueue packet */
1759 qbase[q->qw & (qsize - 1)] = pkt;
1762 /* Activate queue in the subport bitmap */
1763 rte_bitmap_set(subport->bmp, qindex);
1766 #ifdef RTE_SCHED_COLLECT_STATS
1767 rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
1768 rte_sched_port_update_queue_stats(subport, qindex, pkt);
1776 * The enqueue function implements a 4-level pipeline with each stage
1777 * processing two different packets. The purpose of using a pipeline
1778 * is to hide the latency of prefetching the data structures. The
1779 * naming convention is presented in the diagram below:
1781 * p00 _______ p10 _______ p20 _______ p30 _______
1782 * ----->| |----->| |----->| |----->| |----->
1783 * | 0 | | 1 | | 2 | | 3 |
1784 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1789 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
1792 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
1793 *pkt30, *pkt31, *pkt_last;
1794 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
1795 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1796 struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
1797 *subport20, *subport21, *subport30, *subport31, *subport_last;
1798 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1799 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1800 uint32_t subport_qmask;
1804 subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
1807 * Less then 6 input packets available, which is not enough to
1810 if (unlikely(n_pkts < 6)) {
1811 struct rte_sched_subport *subports[5];
1812 struct rte_mbuf **q_base[5];
1815 /* Prefetch the mbuf structure of each packet */
1816 for (i = 0; i < n_pkts; i++)
1817 rte_prefetch0(pkts[i]);
1819 /* Prefetch the subport structure for each packet */
1820 for (i = 0; i < n_pkts; i++)
1821 subports[i] = rte_sched_port_subport(port, pkts[i]);
1823 /* Prefetch the queue structure for each queue */
1824 for (i = 0; i < n_pkts; i++)
1825 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
1826 pkts[i], subport_qmask);
1828 /* Prefetch the write pointer location of each queue */
1829 for (i = 0; i < n_pkts; i++) {
1830 q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
1831 rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
1835 /* Write each packet to its queue */
1836 for (i = 0; i < n_pkts; i++)
1837 result += rte_sched_port_enqueue_qwa(port, subports[i],
1838 q[i], q_base[i], pkts[i]);
1843 /* Feed the first 3 stages of the pipeline (6 packets needed) */
1846 rte_prefetch0(pkt20);
1847 rte_prefetch0(pkt21);
1851 rte_prefetch0(pkt10);
1852 rte_prefetch0(pkt11);
1854 subport20 = rte_sched_port_subport(port, pkt20);
1855 subport21 = rte_sched_port_subport(port, pkt21);
1856 q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
1857 pkt20, subport_qmask);
1858 q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
1859 pkt21, subport_qmask);
1863 rte_prefetch0(pkt00);
1864 rte_prefetch0(pkt01);
1866 subport10 = rte_sched_port_subport(port, pkt10);
1867 subport11 = rte_sched_port_subport(port, pkt11);
1868 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
1869 pkt10, subport_qmask);
1870 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
1871 pkt11, subport_qmask);
1873 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
1874 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
1875 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
1876 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
1878 /* Run the pipeline */
1879 for (i = 6; i < (n_pkts & (~1)); i += 2) {
1880 /* Propagate stage inputs */
1891 subport30 = subport20;
1892 subport31 = subport21;
1893 subport20 = subport10;
1894 subport21 = subport11;
1895 q30_base = q20_base;
1896 q31_base = q21_base;
1898 /* Stage 0: Get packets in */
1900 pkt01 = pkts[i + 1];
1901 rte_prefetch0(pkt00);
1902 rte_prefetch0(pkt01);
1904 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
1905 subport10 = rte_sched_port_subport(port, pkt10);
1906 subport11 = rte_sched_port_subport(port, pkt11);
1907 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
1908 pkt10, subport_qmask);
1909 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
1910 pkt11, subport_qmask);
1912 /* Stage 2: Prefetch queue write location */
1913 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
1914 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
1915 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
1916 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
1918 /* Stage 3: Write packet to queue and activate queue */
1919 r30 = rte_sched_port_enqueue_qwa(port, subport30,
1920 q30, q30_base, pkt30);
1921 r31 = rte_sched_port_enqueue_qwa(port, subport31,
1922 q31, q31_base, pkt31);
1923 result += r30 + r31;
1927 * Drain the pipeline (exactly 6 packets).
1928 * Handle the last packet in the case
1929 * of an odd number of input packets.
1931 pkt_last = pkts[n_pkts - 1];
1932 rte_prefetch0(pkt_last);
1934 subport00 = rte_sched_port_subport(port, pkt00);
1935 subport01 = rte_sched_port_subport(port, pkt01);
1936 q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
1937 pkt00, subport_qmask);
1938 q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
1939 pkt01, subport_qmask);
1941 q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
1942 q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
1943 rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
1944 rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
1946 r20 = rte_sched_port_enqueue_qwa(port, subport20,
1947 q20, q20_base, pkt20);
1948 r21 = rte_sched_port_enqueue_qwa(port, subport21,
1949 q21, q21_base, pkt21);
1950 result += r20 + r21;
1952 subport_last = rte_sched_port_subport(port, pkt_last);
1953 q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
1954 pkt_last, subport_qmask);
1956 q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
1957 q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
1958 rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
1959 rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
1961 r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
1963 r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
1965 result += r10 + r11;
1967 q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
1968 rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
1969 q_last, q_last_base);
1971 r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
1973 r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
1975 result += r00 + r01;
1978 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
1979 q_last, q_last_base, pkt_last);
1986 #ifndef RTE_SCHED_SUBPORT_TC_OV
1989 grinder_credits_update(struct rte_sched_port *port,
1990 struct rte_sched_subport *subport, uint32_t pos)
1992 struct rte_sched_grinder *grinder = subport->grinder + pos;
1993 struct rte_sched_pipe *pipe = grinder->pipe;
1994 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1999 n_periods = (port->time - subport->tb_time) / subport->tb_period;
2000 subport->tb_credits += n_periods * subport->tb_credits_per_period;
2001 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
2002 subport->tb_time += n_periods * subport->tb_period;
2005 n_periods = (port->time - pipe->tb_time) / params->tb_period;
2006 pipe->tb_credits += n_periods * params->tb_credits_per_period;
2007 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
2008 pipe->tb_time += n_periods * params->tb_period;
2011 if (unlikely(port->time >= subport->tc_time)) {
2012 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2013 subport->tc_credits[i] = subport->tc_credits_per_period[i];
2015 subport->tc_time = port->time + subport->tc_period;
2019 if (unlikely(port->time >= pipe->tc_time)) {
2020 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2021 pipe->tc_credits[i] = params->tc_credits_per_period[i];
2023 pipe->tc_time = port->time + params->tc_period;
2029 static inline uint32_t
2030 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2031 struct rte_sched_subport *subport)
2033 uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2034 uint32_t tc_consumption = 0, tc_ov_consumption_max;
2035 uint32_t tc_ov_wm = subport->tc_ov_wm;
2038 if (subport->tc_ov == 0)
2039 return subport->tc_ov_wm_max;
2041 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2042 tc_ov_consumption[i] =
2043 subport->tc_credits_per_period[i] - subport->tc_credits[i];
2044 tc_consumption += tc_ov_consumption[i];
2047 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2048 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2049 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2051 tc_ov_consumption_max =
2052 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2055 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2056 (tc_ov_consumption_max - port->mtu)) {
2057 tc_ov_wm -= tc_ov_wm >> 7;
2058 if (tc_ov_wm < subport->tc_ov_wm_min)
2059 tc_ov_wm = subport->tc_ov_wm_min;
2064 tc_ov_wm += (tc_ov_wm >> 7) + 1;
2065 if (tc_ov_wm > subport->tc_ov_wm_max)
2066 tc_ov_wm = subport->tc_ov_wm_max;
2072 grinder_credits_update(struct rte_sched_port *port,
2073 struct rte_sched_subport *subport, uint32_t pos)
2075 struct rte_sched_grinder *grinder = subport->grinder + pos;
2076 struct rte_sched_pipe *pipe = grinder->pipe;
2077 struct rte_sched_pipe_profile *params = grinder->pipe_params;
2082 n_periods = (port->time - subport->tb_time) / subport->tb_period;
2083 subport->tb_credits += n_periods * subport->tb_credits_per_period;
2084 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
2085 subport->tb_time += n_periods * subport->tb_period;
2088 n_periods = (port->time - pipe->tb_time) / params->tb_period;
2089 pipe->tb_credits += n_periods * params->tb_credits_per_period;
2090 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
2091 pipe->tb_time += n_periods * params->tb_period;
2094 if (unlikely(port->time >= subport->tc_time)) {
2095 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, subport);
2097 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2098 subport->tc_credits[i] = subport->tc_credits_per_period[i];
2100 subport->tc_time = port->time + subport->tc_period;
2101 subport->tc_ov_period_id++;
2105 if (unlikely(port->time >= pipe->tc_time)) {
2106 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2107 pipe->tc_credits[i] = params->tc_credits_per_period[i];
2108 pipe->tc_time = port->time + params->tc_period;
2111 /* Pipe TCs - Oversubscription */
2112 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2113 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2115 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2119 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2122 #ifndef RTE_SCHED_SUBPORT_TC_OV
2125 grinder_credits_check(struct rte_sched_port *port,
2126 struct rte_sched_subport *subport, uint32_t pos)
2128 struct rte_sched_grinder *grinder = subport->grinder + pos;
2129 struct rte_sched_pipe *pipe = grinder->pipe;
2130 struct rte_mbuf *pkt = grinder->pkt;
2131 uint32_t tc_index = grinder->tc_index;
2132 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2133 uint32_t subport_tb_credits = subport->tb_credits;
2134 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
2135 uint32_t pipe_tb_credits = pipe->tb_credits;
2136 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
2139 /* Check queue credits */
2140 enough_credits = (pkt_len <= subport_tb_credits) &&
2141 (pkt_len <= subport_tc_credits) &&
2142 (pkt_len <= pipe_tb_credits) &&
2143 (pkt_len <= pipe_tc_credits);
2145 if (!enough_credits)
2148 /* Update port credits */
2149 subport->tb_credits -= pkt_len;
2150 subport->tc_credits[tc_index] -= pkt_len;
2151 pipe->tb_credits -= pkt_len;
2152 pipe->tc_credits[tc_index] -= pkt_len;
2160 grinder_credits_check(struct rte_sched_port *port,
2161 struct rte_sched_subport *subport, uint32_t pos)
2163 struct rte_sched_grinder *grinder = subport->grinder + pos;
2164 struct rte_sched_pipe *pipe = grinder->pipe;
2165 struct rte_mbuf *pkt = grinder->pkt;
2166 uint32_t tc_index = grinder->tc_index;
2167 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2168 uint32_t subport_tb_credits = subport->tb_credits;
2169 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
2170 uint32_t pipe_tb_credits = pipe->tb_credits;
2171 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
2172 uint32_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2173 uint32_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2174 uint32_t pipe_tc_ov_credits, i;
2177 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2178 pipe_tc_ov_mask1[i] = UINT32_MAX;
2180 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2181 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = UINT32_MAX;
2182 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2184 /* Check pipe and subport credits */
2185 enough_credits = (pkt_len <= subport_tb_credits) &&
2186 (pkt_len <= subport_tc_credits) &&
2187 (pkt_len <= pipe_tb_credits) &&
2188 (pkt_len <= pipe_tc_credits) &&
2189 (pkt_len <= pipe_tc_ov_credits);
2191 if (!enough_credits)
2194 /* Update pipe and subport credits */
2195 subport->tb_credits -= pkt_len;
2196 subport->tc_credits[tc_index] -= pkt_len;
2197 pipe->tb_credits -= pkt_len;
2198 pipe->tc_credits[tc_index] -= pkt_len;
2199 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2204 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2208 grinder_schedule(struct rte_sched_port *port,
2209 struct rte_sched_subport *subport, uint32_t pos)
2211 struct rte_sched_grinder *grinder = subport->grinder + pos;
2212 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2213 struct rte_mbuf *pkt = grinder->pkt;
2214 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2215 uint32_t be_tc_active;
2217 if (!grinder_credits_check(port, subport, pos))
2220 /* Advance port time */
2221 port->time += pkt_len;
2224 port->pkts_out[port->n_pkts_out++] = pkt;
2227 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2228 grinder->wrr_tokens[grinder->qpos] +=
2229 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2231 if (queue->qr == queue->qw) {
2232 uint32_t qindex = grinder->qindex[grinder->qpos];
2234 rte_bitmap_clear(subport->bmp, qindex);
2235 grinder->qmask &= ~(1 << grinder->qpos);
2237 grinder->wrr_mask[grinder->qpos] = 0;
2238 rte_sched_port_set_queue_empty_timestamp(port, subport, qindex);
2241 /* Reset pipe loop detection */
2242 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2243 grinder->productive = 1;
2248 #ifdef SCHED_VECTOR_SSE4
2251 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2253 __m128i index = _mm_set1_epi32(base_pipe);
2254 __m128i pipes = _mm_load_si128((__m128i *)subport->grinder_base_bmp_pos);
2255 __m128i res = _mm_cmpeq_epi32(pipes, index);
2257 pipes = _mm_load_si128((__m128i *)(subport->grinder_base_bmp_pos + 4));
2258 pipes = _mm_cmpeq_epi32(pipes, index);
2259 res = _mm_or_si128(res, pipes);
2261 if (_mm_testz_si128(res, res))
2267 #elif defined(SCHED_VECTOR_NEON)
2270 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2272 uint32x4_t index, pipes;
2273 uint32_t *pos = (uint32_t *)subport->grinder_base_bmp_pos;
2275 index = vmovq_n_u32(base_pipe);
2276 pipes = vld1q_u32(pos);
2277 if (!vminvq_u32(veorq_u32(pipes, index)))
2280 pipes = vld1q_u32(pos + 4);
2281 if (!vminvq_u32(veorq_u32(pipes, index)))
2290 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2294 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2295 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2302 #endif /* RTE_SCHED_OPTIMIZATIONS */
2305 grinder_pcache_populate(struct rte_sched_subport *subport,
2306 uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2308 struct rte_sched_grinder *grinder = subport->grinder + pos;
2311 grinder->pcache_w = 0;
2312 grinder->pcache_r = 0;
2314 w[0] = (uint16_t) bmp_slab;
2315 w[1] = (uint16_t) (bmp_slab >> 16);
2316 w[2] = (uint16_t) (bmp_slab >> 32);
2317 w[3] = (uint16_t) (bmp_slab >> 48);
2319 grinder->pcache_qmask[grinder->pcache_w] = w[0];
2320 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2321 grinder->pcache_w += (w[0] != 0);
2323 grinder->pcache_qmask[grinder->pcache_w] = w[1];
2324 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2325 grinder->pcache_w += (w[1] != 0);
2327 grinder->pcache_qmask[grinder->pcache_w] = w[2];
2328 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2329 grinder->pcache_w += (w[2] != 0);
2331 grinder->pcache_qmask[grinder->pcache_w] = w[3];
2332 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2333 grinder->pcache_w += (w[3] != 0);
2337 grinder_tccache_populate(struct rte_sched_subport *subport,
2338 uint32_t pos, uint32_t qindex, uint16_t qmask)
2340 struct rte_sched_grinder *grinder = subport->grinder + pos;
2343 grinder->tccache_w = 0;
2344 grinder->tccache_r = 0;
2346 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2347 b = (uint8_t) ((qmask >> i) & 0x1);
2348 grinder->tccache_qmask[grinder->tccache_w] = b;
2349 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2350 grinder->tccache_w += (b != 0);
2353 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2354 grinder->tccache_qmask[grinder->tccache_w] = b;
2355 grinder->tccache_qindex[grinder->tccache_w] = qindex +
2356 RTE_SCHED_TRAFFIC_CLASS_BE;
2357 grinder->tccache_w += (b != 0);
2361 grinder_next_tc(struct rte_sched_port *port,
2362 struct rte_sched_subport *subport, uint32_t pos)
2364 struct rte_sched_grinder *grinder = subport->grinder + pos;
2365 struct rte_mbuf **qbase;
2369 if (grinder->tccache_r == grinder->tccache_w)
2372 qindex = grinder->tccache_qindex[grinder->tccache_r];
2373 qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2374 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2376 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2377 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2378 grinder->qsize = qsize;
2380 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2381 grinder->queue[0] = subport->queue + qindex;
2382 grinder->qbase[0] = qbase;
2383 grinder->qindex[0] = qindex;
2384 grinder->tccache_r++;
2389 grinder->queue[0] = subport->queue + qindex;
2390 grinder->queue[1] = subport->queue + qindex + 1;
2391 grinder->queue[2] = subport->queue + qindex + 2;
2392 grinder->queue[3] = subport->queue + qindex + 3;
2394 grinder->qbase[0] = qbase;
2395 grinder->qbase[1] = qbase + qsize;
2396 grinder->qbase[2] = qbase + 2 * qsize;
2397 grinder->qbase[3] = qbase + 3 * qsize;
2399 grinder->qindex[0] = qindex;
2400 grinder->qindex[1] = qindex + 1;
2401 grinder->qindex[2] = qindex + 2;
2402 grinder->qindex[3] = qindex + 3;
2404 grinder->tccache_r++;
2409 grinder_next_pipe(struct rte_sched_port *port,
2410 struct rte_sched_subport *subport, uint32_t pos)
2412 struct rte_sched_grinder *grinder = subport->grinder + pos;
2413 uint32_t pipe_qindex;
2414 uint16_t pipe_qmask;
2416 if (grinder->pcache_r < grinder->pcache_w) {
2417 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2418 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2419 grinder->pcache_r++;
2421 uint64_t bmp_slab = 0;
2422 uint32_t bmp_pos = 0;
2424 /* Get another non-empty pipe group */
2425 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2428 #ifdef RTE_SCHED_DEBUG
2429 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2432 /* Return if pipe group already in one of the other grinders */
2433 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2434 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2437 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2439 /* Install new pipe group into grinder's pipe cache */
2440 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2442 pipe_qmask = grinder->pcache_qmask[0];
2443 pipe_qindex = grinder->pcache_qindex[0];
2444 grinder->pcache_r = 1;
2447 /* Install new pipe in the grinder */
2448 grinder->pindex = pipe_qindex >> 4;
2449 grinder->subport = subport;
2450 grinder->pipe = subport->pipe + grinder->pindex;
2451 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2452 grinder->productive = 0;
2454 grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2455 grinder_next_tc(port, subport, pos);
2457 /* Check for pipe exhaustion */
2458 if (grinder->pindex == subport->pipe_loop) {
2459 subport->pipe_exhaustion = 1;
2460 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2468 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2470 struct rte_sched_grinder *grinder = subport->grinder + pos;
2471 struct rte_sched_pipe *pipe = grinder->pipe;
2472 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2473 uint32_t qmask = grinder->qmask;
2475 grinder->wrr_tokens[0] =
2476 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2477 grinder->wrr_tokens[1] =
2478 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2479 grinder->wrr_tokens[2] =
2480 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2481 grinder->wrr_tokens[3] =
2482 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2484 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2485 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2486 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2487 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2489 grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2490 grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2491 grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2492 grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2496 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2498 struct rte_sched_grinder *grinder = subport->grinder + pos;
2499 struct rte_sched_pipe *pipe = grinder->pipe;
2501 pipe->wrr_tokens[0] =
2502 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2503 RTE_SCHED_WRR_SHIFT;
2504 pipe->wrr_tokens[1] =
2505 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2506 RTE_SCHED_WRR_SHIFT;
2507 pipe->wrr_tokens[2] =
2508 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2509 RTE_SCHED_WRR_SHIFT;
2510 pipe->wrr_tokens[3] =
2511 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2512 RTE_SCHED_WRR_SHIFT;
2516 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2518 struct rte_sched_grinder *grinder = subport->grinder + pos;
2519 uint16_t wrr_tokens_min;
2521 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2522 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2523 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2524 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2526 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2527 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2529 grinder->wrr_tokens[0] -= wrr_tokens_min;
2530 grinder->wrr_tokens[1] -= wrr_tokens_min;
2531 grinder->wrr_tokens[2] -= wrr_tokens_min;
2532 grinder->wrr_tokens[3] -= wrr_tokens_min;
2536 #define grinder_evict(subport, pos)
2539 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2541 struct rte_sched_grinder *grinder = subport->grinder + pos;
2543 rte_prefetch0(grinder->pipe);
2544 rte_prefetch0(grinder->queue[0]);
2548 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2550 struct rte_sched_grinder *grinder = subport->grinder + pos;
2551 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2553 qsize = grinder->qsize;
2556 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2557 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2559 rte_prefetch0(grinder->qbase[0] + qr[0]);
2563 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2564 qr[1] = grinder->queue[1]->qr & (qsize - 1);
2565 qr[2] = grinder->queue[2]->qr & (qsize - 1);
2566 qr[3] = grinder->queue[3]->qr & (qsize - 1);
2568 rte_prefetch0(grinder->qbase[0] + qr[0]);
2569 rte_prefetch0(grinder->qbase[1] + qr[1]);
2571 grinder_wrr_load(subport, pos);
2572 grinder_wrr(subport, pos);
2574 rte_prefetch0(grinder->qbase[2] + qr[2]);
2575 rte_prefetch0(grinder->qbase[3] + qr[3]);
2579 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2581 struct rte_sched_grinder *grinder = subport->grinder + pos;
2582 uint32_t qpos = grinder->qpos;
2583 struct rte_mbuf **qbase = grinder->qbase[qpos];
2584 uint16_t qsize = grinder->qsize;
2585 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2587 grinder->pkt = qbase[qr];
2588 rte_prefetch0(grinder->pkt);
2590 if (unlikely((qr & 0x7) == 7)) {
2591 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2593 rte_prefetch0(qbase + qr_next);
2597 static inline uint32_t
2598 grinder_handle(struct rte_sched_port *port,
2599 struct rte_sched_subport *subport, uint32_t pos)
2601 struct rte_sched_grinder *grinder = subport->grinder + pos;
2603 switch (grinder->state) {
2604 case e_GRINDER_PREFETCH_PIPE:
2606 if (grinder_next_pipe(port, subport, pos)) {
2607 grinder_prefetch_pipe(subport, pos);
2608 subport->busy_grinders++;
2610 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2617 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2619 struct rte_sched_pipe *pipe = grinder->pipe;
2621 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2622 grinder_prefetch_tc_queue_arrays(subport, pos);
2623 grinder_credits_update(port, subport, pos);
2625 grinder->state = e_GRINDER_PREFETCH_MBUF;
2629 case e_GRINDER_PREFETCH_MBUF:
2631 grinder_prefetch_mbuf(subport, pos);
2633 grinder->state = e_GRINDER_READ_MBUF;
2637 case e_GRINDER_READ_MBUF:
2639 uint32_t wrr_active, result = 0;
2641 result = grinder_schedule(port, subport, pos);
2643 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2645 /* Look for next packet within the same TC */
2646 if (result && grinder->qmask) {
2648 grinder_wrr(subport, pos);
2650 grinder_prefetch_mbuf(subport, pos);
2656 grinder_wrr_store(subport, pos);
2658 /* Look for another active TC within same pipe */
2659 if (grinder_next_tc(port, subport, pos)) {
2660 grinder_prefetch_tc_queue_arrays(subport, pos);
2662 grinder->state = e_GRINDER_PREFETCH_MBUF;
2666 if (grinder->productive == 0 &&
2667 subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2668 subport->pipe_loop = grinder->pindex;
2670 grinder_evict(subport, pos);
2672 /* Look for another active pipe */
2673 if (grinder_next_pipe(port, subport, pos)) {
2674 grinder_prefetch_pipe(subport, pos);
2676 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2680 /* No active pipe found */
2681 subport->busy_grinders--;
2683 grinder->state = e_GRINDER_PREFETCH_PIPE;
2688 rte_panic("Algorithmic error (invalid state)\n");
2694 rte_sched_port_time_resync(struct rte_sched_port *port)
2696 uint64_t cycles = rte_get_tsc_cycles();
2697 uint64_t cycles_diff = cycles - port->time_cpu_cycles;
2698 uint64_t bytes_diff;
2701 /* Compute elapsed time in bytes */
2702 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2703 port->inv_cycles_per_byte);
2705 /* Advance port time */
2706 port->time_cpu_cycles = cycles;
2707 port->time_cpu_bytes += bytes_diff;
2708 if (port->time < port->time_cpu_bytes)
2709 port->time = port->time_cpu_bytes;
2711 /* Reset pipe loop detection */
2712 for (i = 0; i < port->n_subports_per_port; i++)
2713 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2717 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2721 /* Check if any exception flag is set */
2722 exceptions = (second_pass && subport->busy_grinders == 0) ||
2723 (subport->pipe_exhaustion == 1);
2725 /* Clear exception flags */
2726 subport->pipe_exhaustion = 0;
2732 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2734 struct rte_sched_subport *subport;
2735 uint32_t subport_id = port->subport_id;
2736 uint32_t i, n_subports = 0, count;
2738 port->pkts_out = pkts;
2739 port->n_pkts_out = 0;
2741 rte_sched_port_time_resync(port);
2743 /* Take each queue in the grinder one step further */
2744 for (i = 0, count = 0; ; i++) {
2745 subport = port->subports[subport_id];
2747 count += grinder_handle(port, subport,
2748 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2750 if (count == n_pkts) {
2753 if (subport_id == port->n_subports_per_port)
2756 port->subport_id = subport_id;
2760 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
2766 if (subport_id == port->n_subports_per_port)
2769 if (n_subports == port->n_subports_per_port) {
2770 port->subport_id = subport_id;