1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
27 #ifdef RTE_SCHED_VECTOR
31 #define SCHED_VECTOR_SSE4
32 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
33 #define SCHED_VECTOR_NEON
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
39 #define RTE_SCHED_WRR_SHIFT 3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
45 /* Scaling for cycles_per_byte calculation
46 * Chosen so that minimum rate is 480 bit/sec
48 #define RTE_SCHED_TIME_SHIFT 8
50 struct rte_sched_subport {
51 /* Token bucket (TB) */
52 uint64_t tb_time; /* time of last update */
54 uint32_t tb_credits_per_period;
58 /* Traffic classes (TCs) */
59 uint64_t tc_time; /* time of next update */
60 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
61 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
64 /* TC oversubscription */
66 uint32_t tc_ov_wm_min;
67 uint32_t tc_ov_wm_max;
68 uint8_t tc_ov_period_id;
74 struct rte_sched_subport_stats stats;
77 struct rte_sched_pipe_profile {
78 /* Token bucket (TB) */
80 uint32_t tb_credits_per_period;
83 /* Pipe traffic classes */
85 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
88 /* Pipe best-effort traffic class queues */
89 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
92 struct rte_sched_pipe {
93 /* Token bucket (TB) */
94 uint64_t tb_time; /* time of last update */
97 /* Pipe profile and flags */
100 /* Traffic classes (TCs) */
101 uint64_t tc_time; /* time of next update */
102 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
104 /* Weighted Round Robin (WRR) */
105 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
107 /* TC oversubscription */
108 uint32_t tc_ov_credits;
109 uint8_t tc_ov_period_id;
111 } __rte_cache_aligned;
113 struct rte_sched_queue {
118 struct rte_sched_queue_extra {
119 struct rte_sched_queue_stats stats;
126 e_GRINDER_PREFETCH_PIPE = 0,
127 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
128 e_GRINDER_PREFETCH_MBUF,
132 struct rte_sched_grinder {
134 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
135 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
140 enum grinder_state state;
143 struct rte_sched_subport *subport;
144 struct rte_sched_pipe *pipe;
145 struct rte_sched_pipe_profile *pipe_params;
148 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
149 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
155 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
156 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
157 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
161 struct rte_mbuf *pkt;
164 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
165 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
166 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
169 struct rte_sched_port {
170 /* User parameters */
171 uint32_t n_subports_per_port;
172 uint32_t n_pipes_per_subport;
173 uint32_t n_pipes_per_subport_log2;
174 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
175 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
176 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
179 uint32_t frame_overhead;
180 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
181 uint32_t n_pipe_profiles;
182 uint32_t n_max_pipe_profiles;
183 uint32_t pipe_tc3_rate_max;
185 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
189 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
190 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
191 uint64_t time; /* Current NIC TX time measured in bytes */
192 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
194 /* Scheduling loop detection */
196 uint32_t pipe_exhaustion;
199 struct rte_bitmap *bmp;
200 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
203 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
204 uint32_t busy_grinders;
205 struct rte_mbuf **pkts_out;
208 /* Queue base calculation */
209 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
212 /* Large data structures */
213 struct rte_sched_subport *subport;
214 struct rte_sched_pipe *pipe;
215 struct rte_sched_queue *queue;
216 struct rte_sched_queue_extra *queue_extra;
217 struct rte_sched_pipe_profile *pipe_profiles;
219 struct rte_mbuf **queue_array;
220 uint8_t memory[0] __rte_cache_aligned;
221 } __rte_cache_aligned;
223 enum rte_sched_port_array {
224 e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
225 e_RTE_SCHED_PORT_ARRAY_PIPE,
226 e_RTE_SCHED_PORT_ARRAY_QUEUE,
227 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
228 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
229 e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
230 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
231 e_RTE_SCHED_PORT_ARRAY_TOTAL,
234 #ifdef RTE_SCHED_COLLECT_STATS
236 static inline uint32_t
237 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
239 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
244 static inline uint32_t
245 rte_sched_port_queues_per_port(struct rte_sched_port *port)
247 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
250 static inline struct rte_mbuf **
251 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
253 uint32_t pindex = qindex >> 4;
254 uint32_t qpos = qindex & 0xF;
256 return (port->queue_array + pindex *
257 port->qsize_sum + port->qsize_add[qpos]);
260 static inline uint16_t
261 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
263 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
265 return port->qsize[tc];
268 static inline uint16_t
269 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
271 uint16_t pipe_queue = port->pipe_queue[traffic_class];
276 static inline uint8_t
277 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
279 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
284 static inline uint8_t
285 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
287 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
293 pipe_profile_check(struct rte_sched_pipe_params *params,
294 uint32_t rate, uint16_t *qsize)
298 /* Pipe parameters */
302 /* TB rate: non-zero, not greater than port rate */
303 if (params->tb_rate == 0 ||
304 params->tb_rate > rate)
307 /* TB size: non-zero */
308 if (params->tb_size == 0)
311 /* TC rate: non-zero if qsize non-zero, less than pipe rate */
312 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
313 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
314 (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
315 params->tc_rate[i] > params->tb_rate)))
318 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
319 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0)
322 /* TC period: non-zero */
323 if (params->tc_period == 0)
326 /* TC3 oversubscription weight: non-zero */
327 if (params->tc_ov_weight == 0)
330 /* Queue WRR weights: non-zero */
331 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
332 if (params->wrr_weights[i] == 0)
340 rte_sched_port_check_params(struct rte_sched_port_params *params)
348 if (params->socket < 0)
352 if (params->rate == 0)
356 if (params->mtu == 0)
359 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
360 if (params->n_subports_per_port == 0 ||
361 params->n_subports_per_port > 1u << 16 ||
362 !rte_is_power_of_2(params->n_subports_per_port))
365 /* n_pipes_per_subport: non-zero, power of 2 */
366 if (params->n_pipes_per_subport == 0 ||
367 !rte_is_power_of_2(params->n_pipes_per_subport))
370 /* qsize: non-zero, power of 2,
371 * no bigger than 32K (due to 16-bit read/write pointers)
373 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
374 uint16_t qsize = params->qsize[i];
376 if ((qsize != 0 && !rte_is_power_of_2(qsize)) ||
377 ((i == RTE_SCHED_TRAFFIC_CLASS_BE) && (qsize == 0)))
381 /* pipe_profiles and n_pipe_profiles */
382 if (params->pipe_profiles == NULL ||
383 params->n_pipe_profiles == 0 ||
384 params->n_pipe_profiles > params->n_max_pipe_profiles)
387 for (i = 0; i < params->n_pipe_profiles; i++) {
388 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
391 status = pipe_profile_check(p, params->rate, ¶ms->qsize[0]);
400 rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
402 uint32_t n_subports_per_port = params->n_subports_per_port;
403 uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
404 uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
405 uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
407 uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
408 uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
409 uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
410 uint32_t size_queue_extra
411 = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
412 uint32_t size_pipe_profiles
413 = params->n_max_pipe_profiles * sizeof(struct rte_sched_pipe_profile);
414 uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
415 uint32_t size_per_pipe_queue_array, size_queue_array;
419 size_per_pipe_queue_array = 0;
420 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
421 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
422 size_per_pipe_queue_array +=
423 params->qsize[i] * sizeof(struct rte_mbuf *);
425 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
426 params->qsize[i] * sizeof(struct rte_mbuf *);
428 size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
432 if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT)
434 base += RTE_CACHE_LINE_ROUNDUP(size_subport);
436 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE)
438 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
440 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE)
442 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
444 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA)
446 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
448 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES)
450 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
452 if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY)
454 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
456 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY)
458 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
464 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
466 uint32_t size0, size1;
469 status = rte_sched_port_check_params(params);
471 RTE_LOG(NOTICE, SCHED,
472 "Port scheduler params check failed (%d)\n", status);
477 size0 = sizeof(struct rte_sched_port);
478 size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
480 return size0 + size1;
484 rte_sched_port_config_qsize(struct rte_sched_port *port)
488 port->qsize_add[0] = 0;
490 /* Strict prority traffic class */
491 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
492 port->qsize_add[i] = port->qsize_add[i-1] + port->qsize[i-1];
494 /* Best-effort traffic class */
495 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
496 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
497 port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
498 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
499 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
500 port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
501 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
502 port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
503 port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
505 port->qsize_sum = port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
506 port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
510 rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
512 struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
514 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
515 " Token bucket: period = %u, credits per period = %u, size = %u\n"
516 " Traffic classes: period = %u,\n"
517 " credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u]\n"
518 " Best-effort traffic class oversubscription: weight = %hhu\n"
519 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
524 p->tb_credits_per_period,
527 /* Traffic classes */
529 p->tc_credits_per_period[0],
530 p->tc_credits_per_period[1],
531 p->tc_credits_per_period[2],
532 p->tc_credits_per_period[3],
533 p->tc_credits_per_period[4],
534 p->tc_credits_per_period[5],
535 p->tc_credits_per_period[6],
536 p->tc_credits_per_period[7],
537 p->tc_credits_per_period[8],
538 p->tc_credits_per_period[9],
539 p->tc_credits_per_period[10],
540 p->tc_credits_per_period[11],
541 p->tc_credits_per_period[12],
543 /* Traffic class 3 oversubscription */
547 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
550 static inline uint64_t
551 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
553 uint64_t time = time_ms;
555 time = (time * rate) / 1000;
561 rte_sched_pipe_profile_convert(struct rte_sched_port *port,
562 struct rte_sched_pipe_params *src,
563 struct rte_sched_pipe_profile *dst,
566 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
567 uint32_t lcd1, lcd2, lcd;
571 if (src->tb_rate == rate) {
572 dst->tb_credits_per_period = 1;
575 double tb_rate = (double) src->tb_rate
577 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
579 rte_approx(tb_rate, d,
580 &dst->tb_credits_per_period, &dst->tb_period);
583 dst->tb_size = src->tb_size;
585 /* Traffic Classes */
586 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
589 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
591 dst->tc_credits_per_period[i]
592 = rte_sched_time_ms_to_bytes(src->tc_period,
595 dst->tc_ov_weight = src->tc_ov_weight;
598 wrr_cost[0] = src->wrr_weights[0];
599 wrr_cost[1] = src->wrr_weights[1];
600 wrr_cost[2] = src->wrr_weights[2];
601 wrr_cost[3] = src->wrr_weights[3];
603 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
604 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
605 lcd = rte_get_lcd(lcd1, lcd2);
607 wrr_cost[0] = lcd / wrr_cost[0];
608 wrr_cost[1] = lcd / wrr_cost[1];
609 wrr_cost[2] = lcd / wrr_cost[2];
610 wrr_cost[3] = lcd / wrr_cost[3];
612 dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
613 dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
614 dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
615 dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
619 rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port,
620 struct rte_sched_port_params *params)
624 for (i = 0; i < port->n_pipe_profiles; i++) {
625 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
626 struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
628 rte_sched_pipe_profile_convert(port, src, dst, params->rate);
629 rte_sched_port_log_pipe_profile(port, i);
632 port->pipe_tc3_rate_max = 0;
633 for (i = 0; i < port->n_pipe_profiles; i++) {
634 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
635 uint32_t pipe_tc3_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
637 if (port->pipe_tc3_rate_max < pipe_tc3_rate)
638 port->pipe_tc3_rate_max = pipe_tc3_rate;
642 struct rte_sched_port *
643 rte_sched_port_config(struct rte_sched_port_params *params)
645 struct rte_sched_port *port = NULL;
646 uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, j, cycles_per_byte;
648 /* Check user parameters. Determine the amount of memory to allocate */
649 mem_size = rte_sched_port_get_memory_footprint(params);
653 /* Allocate memory to store the data structures */
654 port = rte_zmalloc_socket("qos_params", mem_size, RTE_CACHE_LINE_SIZE,
659 /* compile time checks */
660 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
661 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1));
663 /* User parameters */
664 port->n_subports_per_port = params->n_subports_per_port;
665 port->n_pipes_per_subport = params->n_pipes_per_subport;
666 port->n_pipes_per_subport_log2 =
667 __builtin_ctz(params->n_pipes_per_subport);
669 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
670 port->pipe_queue[i] = i;
672 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
673 port->pipe_tc[i] = j;
675 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
679 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
680 port->tc_queue[i] = j;
682 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
685 port->rate = params->rate;
686 port->mtu = params->mtu + params->frame_overhead;
687 port->frame_overhead = params->frame_overhead;
688 memcpy(port->qsize, params->qsize, sizeof(params->qsize));
689 port->n_pipe_profiles = params->n_pipe_profiles;
690 port->n_max_pipe_profiles = params->n_max_pipe_profiles;
693 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
696 for (j = 0; j < RTE_COLORS; j++) {
697 /* if min/max are both zero, then RED is disabled */
698 if ((params->red_params[i][j].min_th |
699 params->red_params[i][j].max_th) == 0) {
703 if (rte_red_config_init(&port->red_config[i][j],
704 params->red_params[i][j].wq_log2,
705 params->red_params[i][j].min_th,
706 params->red_params[i][j].max_th,
707 params->red_params[i][j].maxp_inv) != 0) {
716 port->time_cpu_cycles = rte_get_tsc_cycles();
717 port->time_cpu_bytes = 0;
720 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
722 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
724 /* Scheduling loop detection */
725 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
726 port->pipe_exhaustion = 0;
729 port->busy_grinders = 0;
730 port->pkts_out = NULL;
731 port->n_pkts_out = 0;
733 /* Queue base calculation */
734 rte_sched_port_config_qsize(port);
736 /* Large data structures */
737 port->subport = (struct rte_sched_subport *)
738 (port->memory + rte_sched_port_get_array_base(params,
739 e_RTE_SCHED_PORT_ARRAY_SUBPORT));
740 port->pipe = (struct rte_sched_pipe *)
741 (port->memory + rte_sched_port_get_array_base(params,
742 e_RTE_SCHED_PORT_ARRAY_PIPE));
743 port->queue = (struct rte_sched_queue *)
744 (port->memory + rte_sched_port_get_array_base(params,
745 e_RTE_SCHED_PORT_ARRAY_QUEUE));
746 port->queue_extra = (struct rte_sched_queue_extra *)
747 (port->memory + rte_sched_port_get_array_base(params,
748 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
749 port->pipe_profiles = (struct rte_sched_pipe_profile *)
750 (port->memory + rte_sched_port_get_array_base(params,
751 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
752 port->bmp_array = port->memory
753 + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
754 port->queue_array = (struct rte_mbuf **)
755 (port->memory + rte_sched_port_get_array_base(params,
756 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
758 /* Pipe profile table */
759 rte_sched_port_config_pipe_profile_table(port, params);
762 n_queues_per_port = rte_sched_port_queues_per_port(port);
763 bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
764 port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array,
766 if (port->bmp == NULL) {
767 RTE_LOG(ERR, SCHED, "Bitmap init error\n");
772 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
773 port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
780 rte_sched_port_free(struct rte_sched_port *port)
783 uint32_t n_queues_per_port;
785 /* Check user parameters */
789 n_queues_per_port = rte_sched_port_queues_per_port(port);
791 /* Free enqueued mbufs */
792 for (qindex = 0; qindex < n_queues_per_port; qindex++) {
793 struct rte_mbuf **mbufs = rte_sched_port_qbase(port, qindex);
794 uint16_t qsize = rte_sched_port_qsize(port, qindex);
796 struct rte_sched_queue *queue = port->queue + qindex;
797 uint16_t qr = queue->qr & (qsize - 1);
798 uint16_t qw = queue->qw & (qsize - 1);
800 for (; qr != qw; qr = (qr + 1) & (qsize - 1))
801 rte_pktmbuf_free(mbufs[qr]);
805 rte_bitmap_free(port->bmp);
810 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
812 struct rte_sched_subport *s = port->subport + i;
814 RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
815 " Token bucket: period = %u, credits per period = %u, size = %u\n"
816 " Traffic classes: period = %u\n"
817 " credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u]\n"
818 " Best effort traffic class oversubscription: wm min = %u, wm max = %u\n",
823 s->tb_credits_per_period,
826 /* Traffic classes */
828 s->tc_credits_per_period[0],
829 s->tc_credits_per_period[1],
830 s->tc_credits_per_period[2],
831 s->tc_credits_per_period[3],
832 s->tc_credits_per_period[4],
833 s->tc_credits_per_period[5],
834 s->tc_credits_per_period[6],
835 s->tc_credits_per_period[7],
836 s->tc_credits_per_period[8],
837 s->tc_credits_per_period[9],
838 s->tc_credits_per_period[10],
839 s->tc_credits_per_period[11],
840 s->tc_credits_per_period[12],
842 /* Traffic class 3 oversubscription */
848 rte_sched_subport_config(struct rte_sched_port *port,
850 struct rte_sched_subport_params *params)
852 struct rte_sched_subport *s;
855 /* Check user parameters */
857 subport_id >= port->n_subports_per_port ||
861 if (params->tb_rate == 0 || params->tb_rate > port->rate)
864 if (params->tb_size == 0)
867 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
868 uint32_t tc_rate = params->tc_rate[i];
869 uint16_t qsize = port->qsize[i];
871 if ((qsize == 0 && tc_rate != 0) ||
872 (qsize != 0 && tc_rate == 0) ||
873 (tc_rate > params->tb_rate))
877 if (port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
878 params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0)
881 if (params->tc_period == 0)
884 s = port->subport + subport_id;
886 /* Token Bucket (TB) */
887 if (params->tb_rate == port->rate) {
888 s->tb_credits_per_period = 1;
891 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
892 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
894 rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
897 s->tb_size = params->tb_size;
898 s->tb_time = port->time;
899 s->tb_credits = s->tb_size / 2;
901 /* Traffic Classes (TCs) */
902 s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
903 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
905 s->tc_credits_per_period[i]
906 = rte_sched_time_ms_to_bytes(params->tc_period,
910 s->tc_time = port->time + s->tc_period;
911 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
913 s->tc_credits[i] = s->tc_credits_per_period[i];
915 /* TC oversubscription */
916 s->tc_ov_wm_min = port->mtu;
917 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
918 port->pipe_tc3_rate_max);
919 s->tc_ov_wm = s->tc_ov_wm_max;
920 s->tc_ov_period_id = 0;
925 rte_sched_port_log_subport_config(port, subport_id);
931 rte_sched_pipe_config(struct rte_sched_port *port,
934 int32_t pipe_profile)
936 struct rte_sched_subport *s;
937 struct rte_sched_pipe *p;
938 struct rte_sched_pipe_profile *params;
939 uint32_t deactivate, profile, i;
941 /* Check user parameters */
942 profile = (uint32_t) pipe_profile;
943 deactivate = (pipe_profile < 0);
946 subport_id >= port->n_subports_per_port ||
947 pipe_id >= port->n_pipes_per_subport ||
948 (!deactivate && profile >= port->n_pipe_profiles))
952 /* Check that subport configuration is valid */
953 s = port->subport + subport_id;
954 if (s->tb_period == 0)
957 p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
959 /* Handle the case when pipe already has a valid configuration */
961 params = port->pipe_profiles + p->profile;
963 double subport_tc3_rate =
964 (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
965 / (double) s->tc_period;
966 double pipe_tc3_rate =
967 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
968 / (double) params->tc_period;
969 uint32_t tc3_ov = s->tc_ov;
971 /* Unplug pipe from its subport */
972 s->tc_ov_n -= params->tc_ov_weight;
973 s->tc_ov_rate -= pipe_tc3_rate;
974 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
976 if (s->tc_ov != tc3_ov) {
977 RTE_LOG(DEBUG, SCHED,
978 "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
979 subport_id, subport_tc3_rate, s->tc_ov_rate);
983 memset(p, 0, sizeof(struct rte_sched_pipe));
989 /* Apply the new pipe configuration */
990 p->profile = profile;
991 params = port->pipe_profiles + p->profile;
993 /* Token Bucket (TB) */
994 p->tb_time = port->time;
995 p->tb_credits = params->tb_size / 2;
997 /* Traffic Classes (TCs) */
998 p->tc_time = port->time + params->tc_period;
1000 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1002 p->tc_credits[i] = params->tc_credits_per_period[i];
1005 /* Subport TC3 oversubscription */
1006 double subport_tc3_rate =
1007 (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1008 / (double) s->tc_period;
1009 double pipe_tc3_rate =
1010 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1011 / (double) params->tc_period;
1012 uint32_t tc3_ov = s->tc_ov;
1014 s->tc_ov_n += params->tc_ov_weight;
1015 s->tc_ov_rate += pipe_tc3_rate;
1016 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
1018 if (s->tc_ov != tc3_ov) {
1019 RTE_LOG(DEBUG, SCHED,
1020 "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
1021 subport_id, subport_tc3_rate, s->tc_ov_rate);
1023 p->tc_ov_period_id = s->tc_ov_period_id;
1024 p->tc_ov_credits = s->tc_ov_wm;
1031 rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
1032 struct rte_sched_pipe_params *params,
1033 uint32_t *pipe_profile_id)
1035 struct rte_sched_pipe_profile *pp;
1043 /* Pipe profiles not exceeds the max limit */
1044 if (port->n_pipe_profiles >= port->n_max_pipe_profiles)
1048 status = pipe_profile_check(params, port->rate, &port->qsize[0]);
1052 pp = &port->pipe_profiles[port->n_pipe_profiles];
1053 rte_sched_pipe_profile_convert(port, params, pp, port->rate);
1055 /* Pipe profile not exists */
1056 for (i = 0; i < port->n_pipe_profiles; i++)
1057 if (memcmp(port->pipe_profiles + i, pp, sizeof(*pp)) == 0)
1060 /* Pipe profile commit */
1061 *pipe_profile_id = port->n_pipe_profiles;
1062 port->n_pipe_profiles++;
1064 if (port->pipe_tc3_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1065 port->pipe_tc3_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1067 rte_sched_port_log_pipe_profile(port, *pipe_profile_id);
1072 static inline uint32_t
1073 rte_sched_port_qindex(struct rte_sched_port *port,
1076 uint32_t traffic_class,
1079 return ((subport & (port->n_subports_per_port - 1)) <<
1080 (port->n_pipes_per_subport_log2 + 4)) |
1081 ((pipe & (port->n_pipes_per_subport - 1)) << 4) |
1082 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1083 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1087 rte_sched_port_pkt_write(struct rte_sched_port *port,
1088 struct rte_mbuf *pkt,
1089 uint32_t subport, uint32_t pipe,
1090 uint32_t traffic_class,
1091 uint32_t queue, enum rte_color color)
1094 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1096 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1100 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1101 const struct rte_mbuf *pkt,
1102 uint32_t *subport, uint32_t *pipe,
1103 uint32_t *traffic_class, uint32_t *queue)
1105 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1107 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1108 *pipe = (queue_id >> 4) & (port->n_pipes_per_subport - 1);
1109 *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1110 *queue = rte_sched_port_tc_queue(port, queue_id);
1114 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1116 return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1120 rte_sched_subport_read_stats(struct rte_sched_port *port,
1121 uint32_t subport_id,
1122 struct rte_sched_subport_stats *stats,
1125 struct rte_sched_subport *s;
1127 /* Check user parameters */
1128 if (port == NULL || subport_id >= port->n_subports_per_port ||
1129 stats == NULL || tc_ov == NULL)
1132 s = port->subport + subport_id;
1134 /* Copy subport stats and clear */
1135 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1136 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1138 /* Subport TC oversubscription status */
1145 rte_sched_queue_read_stats(struct rte_sched_port *port,
1147 struct rte_sched_queue_stats *stats,
1150 struct rte_sched_queue *q;
1151 struct rte_sched_queue_extra *qe;
1153 /* Check user parameters */
1154 if ((port == NULL) ||
1155 (queue_id >= rte_sched_port_queues_per_port(port)) ||
1160 q = port->queue + queue_id;
1161 qe = port->queue_extra + queue_id;
1163 /* Copy queue stats and clear */
1164 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1165 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1168 *qlen = q->qw - q->qr;
1173 #ifdef RTE_SCHED_DEBUG
1176 rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
1178 struct rte_sched_queue *queue = port->queue + qindex;
1180 return queue->qr == queue->qw;
1183 #endif /* RTE_SCHED_DEBUG */
1185 #ifdef RTE_SCHED_COLLECT_STATS
1188 rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1190 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1191 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1192 uint32_t pkt_len = pkt->pkt_len;
1194 s->stats.n_pkts_tc[tc_index] += 1;
1195 s->stats.n_bytes_tc[tc_index] += pkt_len;
1198 #ifdef RTE_SCHED_RED
1200 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1202 struct rte_mbuf *pkt, uint32_t red)
1205 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1207 struct rte_mbuf *pkt, __rte_unused uint32_t red)
1210 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1211 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1212 uint32_t pkt_len = pkt->pkt_len;
1214 s->stats.n_pkts_tc_dropped[tc_index] += 1;
1215 s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1216 #ifdef RTE_SCHED_RED
1217 s->stats.n_pkts_red_dropped[tc_index] += red;
1222 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1224 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1225 uint32_t pkt_len = pkt->pkt_len;
1227 qe->stats.n_pkts += 1;
1228 qe->stats.n_bytes += pkt_len;
1231 #ifdef RTE_SCHED_RED
1233 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
1235 struct rte_mbuf *pkt, uint32_t red)
1238 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
1240 struct rte_mbuf *pkt, __rte_unused uint32_t red)
1243 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1244 uint32_t pkt_len = pkt->pkt_len;
1246 qe->stats.n_pkts_dropped += 1;
1247 qe->stats.n_bytes_dropped += pkt_len;
1248 #ifdef RTE_SCHED_RED
1249 qe->stats.n_pkts_red_dropped += red;
1253 #endif /* RTE_SCHED_COLLECT_STATS */
1255 #ifdef RTE_SCHED_RED
1258 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
1260 struct rte_sched_queue_extra *qe;
1261 struct rte_red_config *red_cfg;
1262 struct rte_red *red;
1264 enum rte_color color;
1266 tc_index = rte_sched_port_pipe_tc(port, qindex);
1267 color = rte_sched_port_pkt_read_color(pkt);
1268 red_cfg = &port->red_config[tc_index][color];
1270 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1273 qe = port->queue_extra + qindex;
1276 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1280 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
1282 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1283 struct rte_red *red = &qe->red;
1285 rte_red_mark_queue_empty(red, port->time);
1290 #define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
1292 #define rte_sched_port_set_queue_empty_timestamp(port, qindex)
1294 #endif /* RTE_SCHED_RED */
1296 #ifdef RTE_SCHED_DEBUG
1299 debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos,
1306 rte_panic("Empty slab at position %u\n", bmp_pos);
1309 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1310 if (mask & bmp_slab) {
1311 if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
1312 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1319 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1323 #endif /* RTE_SCHED_DEBUG */
1325 static inline uint32_t
1326 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,
1327 struct rte_mbuf *pkt)
1329 struct rte_sched_queue *q;
1330 #ifdef RTE_SCHED_COLLECT_STATS
1331 struct rte_sched_queue_extra *qe;
1333 uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1335 q = port->queue + qindex;
1337 #ifdef RTE_SCHED_COLLECT_STATS
1338 qe = port->queue_extra + qindex;
1346 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1347 uint32_t qindex, struct rte_mbuf **qbase)
1349 struct rte_sched_queue *q;
1350 struct rte_mbuf **q_qw;
1353 q = port->queue + qindex;
1354 qsize = rte_sched_port_qsize(port, qindex);
1355 q_qw = qbase + (q->qw & (qsize - 1));
1357 rte_prefetch0(q_qw);
1358 rte_bitmap_prefetch0(port->bmp, qindex);
1362 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex,
1363 struct rte_mbuf **qbase, struct rte_mbuf *pkt)
1365 struct rte_sched_queue *q;
1369 q = port->queue + qindex;
1370 qsize = rte_sched_port_qsize(port, qindex);
1371 qlen = q->qw - q->qr;
1373 /* Drop the packet (and update drop stats) when queue is full */
1374 if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) ||
1376 rte_pktmbuf_free(pkt);
1377 #ifdef RTE_SCHED_COLLECT_STATS
1378 rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt,
1380 rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt,
1386 /* Enqueue packet */
1387 qbase[q->qw & (qsize - 1)] = pkt;
1390 /* Activate queue in the port bitmap */
1391 rte_bitmap_set(port->bmp, qindex);
1394 #ifdef RTE_SCHED_COLLECT_STATS
1395 rte_sched_port_update_subport_stats(port, qindex, pkt);
1396 rte_sched_port_update_queue_stats(port, qindex, pkt);
1404 * The enqueue function implements a 4-level pipeline with each stage
1405 * processing two different packets. The purpose of using a pipeline
1406 * is to hide the latency of prefetching the data structures. The
1407 * naming convention is presented in the diagram below:
1409 * p00 _______ p10 _______ p20 _______ p30 _______
1410 * ----->| |----->| |----->| |----->| |----->
1411 * | 0 | | 1 | | 2 | | 3 |
1412 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1417 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
1420 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
1421 *pkt30, *pkt31, *pkt_last;
1422 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
1423 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1424 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1425 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1431 * Less then 6 input packets available, which is not enough to
1434 if (unlikely(n_pkts < 6)) {
1435 struct rte_mbuf **q_base[5];
1438 /* Prefetch the mbuf structure of each packet */
1439 for (i = 0; i < n_pkts; i++)
1440 rte_prefetch0(pkts[i]);
1442 /* Prefetch the queue structure for each queue */
1443 for (i = 0; i < n_pkts; i++)
1444 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port,
1447 /* Prefetch the write pointer location of each queue */
1448 for (i = 0; i < n_pkts; i++) {
1449 q_base[i] = rte_sched_port_qbase(port, q[i]);
1450 rte_sched_port_enqueue_qwa_prefetch0(port, q[i],
1454 /* Write each packet to its queue */
1455 for (i = 0; i < n_pkts; i++)
1456 result += rte_sched_port_enqueue_qwa(port, q[i],
1457 q_base[i], pkts[i]);
1462 /* Feed the first 3 stages of the pipeline (6 packets needed) */
1465 rte_prefetch0(pkt20);
1466 rte_prefetch0(pkt21);
1470 rte_prefetch0(pkt10);
1471 rte_prefetch0(pkt11);
1473 q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
1474 q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
1478 rte_prefetch0(pkt00);
1479 rte_prefetch0(pkt01);
1481 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1482 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1484 q20_base = rte_sched_port_qbase(port, q20);
1485 q21_base = rte_sched_port_qbase(port, q21);
1486 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1487 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1489 /* Run the pipeline */
1490 for (i = 6; i < (n_pkts & (~1)); i += 2) {
1491 /* Propagate stage inputs */
1502 q30_base = q20_base;
1503 q31_base = q21_base;
1505 /* Stage 0: Get packets in */
1507 pkt01 = pkts[i + 1];
1508 rte_prefetch0(pkt00);
1509 rte_prefetch0(pkt01);
1511 /* Stage 1: Prefetch queue structure storing queue pointers */
1512 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1513 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1515 /* Stage 2: Prefetch queue write location */
1516 q20_base = rte_sched_port_qbase(port, q20);
1517 q21_base = rte_sched_port_qbase(port, q21);
1518 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1519 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1521 /* Stage 3: Write packet to queue and activate queue */
1522 r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
1523 r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
1524 result += r30 + r31;
1528 * Drain the pipeline (exactly 6 packets).
1529 * Handle the last packet in the case
1530 * of an odd number of input packets.
1532 pkt_last = pkts[n_pkts - 1];
1533 rte_prefetch0(pkt_last);
1535 q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
1536 q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
1538 q10_base = rte_sched_port_qbase(port, q10);
1539 q11_base = rte_sched_port_qbase(port, q11);
1540 rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
1541 rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
1543 r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
1544 r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
1545 result += r20 + r21;
1547 q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
1549 q00_base = rte_sched_port_qbase(port, q00);
1550 q01_base = rte_sched_port_qbase(port, q01);
1551 rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
1552 rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
1554 r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
1555 r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
1556 result += r10 + r11;
1558 q_last_base = rte_sched_port_qbase(port, q_last);
1559 rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
1561 r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
1562 r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
1563 result += r00 + r01;
1566 r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
1573 #ifndef RTE_SCHED_SUBPORT_TC_OV
1576 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1578 struct rte_sched_grinder *grinder = port->grinder + pos;
1579 struct rte_sched_subport *subport = grinder->subport;
1580 struct rte_sched_pipe *pipe = grinder->pipe;
1581 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1586 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1587 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1588 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1589 subport->tb_time += n_periods * subport->tb_period;
1592 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1593 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1594 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1595 pipe->tb_time += n_periods * params->tb_period;
1598 if (unlikely(port->time >= subport->tc_time)) {
1599 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1600 subport->tc_credits[i] = subport->tc_credits_per_period[i];
1602 subport->tc_time = port->time + subport->tc_period;
1606 if (unlikely(port->time >= pipe->tc_time)) {
1607 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1608 pipe->tc_credits[i] = params->tc_credits_per_period[i];
1610 pipe->tc_time = port->time + params->tc_period;
1616 static inline uint32_t
1617 grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
1619 struct rte_sched_grinder *grinder = port->grinder + pos;
1620 struct rte_sched_subport *subport = grinder->subport;
1621 uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
1622 uint32_t tc_consumption = 0, tc_ov_consumption_max;
1623 uint32_t tc_ov_wm = subport->tc_ov_wm;
1626 if (subport->tc_ov == 0)
1627 return subport->tc_ov_wm_max;
1629 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
1630 tc_ov_consumption[i] =
1631 subport->tc_credits_per_period[i] - subport->tc_credits[i];
1632 tc_consumption += tc_ov_consumption[i];
1635 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
1636 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
1637 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
1639 tc_ov_consumption_max =
1640 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
1643 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
1644 (tc_ov_consumption_max - port->mtu)) {
1645 tc_ov_wm -= tc_ov_wm >> 7;
1646 if (tc_ov_wm < subport->tc_ov_wm_min)
1647 tc_ov_wm = subport->tc_ov_wm_min;
1652 tc_ov_wm += (tc_ov_wm >> 7) + 1;
1653 if (tc_ov_wm > subport->tc_ov_wm_max)
1654 tc_ov_wm = subport->tc_ov_wm_max;
1660 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1662 struct rte_sched_grinder *grinder = port->grinder + pos;
1663 struct rte_sched_subport *subport = grinder->subport;
1664 struct rte_sched_pipe *pipe = grinder->pipe;
1665 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1670 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1671 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1672 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1673 subport->tb_time += n_periods * subport->tb_period;
1676 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1677 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1678 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1679 pipe->tb_time += n_periods * params->tb_period;
1682 if (unlikely(port->time >= subport->tc_time)) {
1683 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
1685 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1686 subport->tc_credits[i] = subport->tc_credits_per_period[i];
1688 subport->tc_time = port->time + subport->tc_period;
1689 subport->tc_ov_period_id++;
1693 if (unlikely(port->time >= pipe->tc_time)) {
1694 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1695 pipe->tc_credits[i] = params->tc_credits_per_period[i];
1696 pipe->tc_time = port->time + params->tc_period;
1699 /* Pipe TCs - Oversubscription */
1700 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
1701 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
1703 pipe->tc_ov_period_id = subport->tc_ov_period_id;
1707 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
1710 #ifndef RTE_SCHED_SUBPORT_TC_OV
1713 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1715 struct rte_sched_grinder *grinder = port->grinder + pos;
1716 struct rte_sched_subport *subport = grinder->subport;
1717 struct rte_sched_pipe *pipe = grinder->pipe;
1718 struct rte_mbuf *pkt = grinder->pkt;
1719 uint32_t tc_index = grinder->tc_index;
1720 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1721 uint32_t subport_tb_credits = subport->tb_credits;
1722 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1723 uint32_t pipe_tb_credits = pipe->tb_credits;
1724 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1727 /* Check queue credits */
1728 enough_credits = (pkt_len <= subport_tb_credits) &&
1729 (pkt_len <= subport_tc_credits) &&
1730 (pkt_len <= pipe_tb_credits) &&
1731 (pkt_len <= pipe_tc_credits);
1733 if (!enough_credits)
1736 /* Update port credits */
1737 subport->tb_credits -= pkt_len;
1738 subport->tc_credits[tc_index] -= pkt_len;
1739 pipe->tb_credits -= pkt_len;
1740 pipe->tc_credits[tc_index] -= pkt_len;
1748 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1750 struct rte_sched_grinder *grinder = port->grinder + pos;
1751 struct rte_sched_subport *subport = grinder->subport;
1752 struct rte_sched_pipe *pipe = grinder->pipe;
1753 struct rte_mbuf *pkt = grinder->pkt;
1754 uint32_t tc_index = grinder->tc_index;
1755 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1756 uint32_t subport_tb_credits = subport->tb_credits;
1757 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1758 uint32_t pipe_tb_credits = pipe->tb_credits;
1759 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1760 uint32_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
1761 uint32_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
1762 uint32_t pipe_tc_ov_credits, i;
1765 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1766 pipe_tc_ov_mask1[i] = UINT32_MAX;
1768 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
1769 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = UINT32_MAX;
1770 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
1772 /* Check pipe and subport credits */
1773 enough_credits = (pkt_len <= subport_tb_credits) &&
1774 (pkt_len <= subport_tc_credits) &&
1775 (pkt_len <= pipe_tb_credits) &&
1776 (pkt_len <= pipe_tc_credits) &&
1777 (pkt_len <= pipe_tc_ov_credits);
1779 if (!enough_credits)
1782 /* Update pipe and subport credits */
1783 subport->tb_credits -= pkt_len;
1784 subport->tc_credits[tc_index] -= pkt_len;
1785 pipe->tb_credits -= pkt_len;
1786 pipe->tc_credits[tc_index] -= pkt_len;
1787 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
1792 #endif /* RTE_SCHED_SUBPORT_TC_OV */
1796 grinder_schedule(struct rte_sched_port *port, uint32_t pos)
1798 struct rte_sched_grinder *grinder = port->grinder + pos;
1799 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
1800 struct rte_mbuf *pkt = grinder->pkt;
1801 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1802 uint32_t be_tc_active;
1804 if (!grinder_credits_check(port, pos))
1807 /* Advance port time */
1808 port->time += pkt_len;
1811 port->pkts_out[port->n_pkts_out++] = pkt;
1814 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
1815 grinder->wrr_tokens[grinder->qpos] +=
1816 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
1818 if (queue->qr == queue->qw) {
1819 uint32_t qindex = grinder->qindex[grinder->qpos];
1821 rte_bitmap_clear(port->bmp, qindex);
1822 grinder->qmask &= ~(1 << grinder->qpos);
1824 grinder->wrr_mask[grinder->qpos] = 0;
1825 rte_sched_port_set_queue_empty_timestamp(port, qindex);
1828 /* Reset pipe loop detection */
1829 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1830 grinder->productive = 1;
1835 #ifdef SCHED_VECTOR_SSE4
1838 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1840 __m128i index = _mm_set1_epi32(base_pipe);
1841 __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
1842 __m128i res = _mm_cmpeq_epi32(pipes, index);
1844 pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
1845 pipes = _mm_cmpeq_epi32(pipes, index);
1846 res = _mm_or_si128(res, pipes);
1848 if (_mm_testz_si128(res, res))
1854 #elif defined(SCHED_VECTOR_NEON)
1857 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1859 uint32x4_t index, pipes;
1860 uint32_t *pos = (uint32_t *)port->grinder_base_bmp_pos;
1862 index = vmovq_n_u32(base_pipe);
1863 pipes = vld1q_u32(pos);
1864 if (!vminvq_u32(veorq_u32(pipes, index)))
1867 pipes = vld1q_u32(pos + 4);
1868 if (!vminvq_u32(veorq_u32(pipes, index)))
1877 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1881 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
1882 if (port->grinder_base_bmp_pos[i] == base_pipe)
1889 #endif /* RTE_SCHED_OPTIMIZATIONS */
1892 grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
1894 struct rte_sched_grinder *grinder = port->grinder + pos;
1897 grinder->pcache_w = 0;
1898 grinder->pcache_r = 0;
1900 w[0] = (uint16_t) bmp_slab;
1901 w[1] = (uint16_t) (bmp_slab >> 16);
1902 w[2] = (uint16_t) (bmp_slab >> 32);
1903 w[3] = (uint16_t) (bmp_slab >> 48);
1905 grinder->pcache_qmask[grinder->pcache_w] = w[0];
1906 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
1907 grinder->pcache_w += (w[0] != 0);
1909 grinder->pcache_qmask[grinder->pcache_w] = w[1];
1910 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
1911 grinder->pcache_w += (w[1] != 0);
1913 grinder->pcache_qmask[grinder->pcache_w] = w[2];
1914 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
1915 grinder->pcache_w += (w[2] != 0);
1917 grinder->pcache_qmask[grinder->pcache_w] = w[3];
1918 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
1919 grinder->pcache_w += (w[3] != 0);
1923 grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
1925 struct rte_sched_grinder *grinder = port->grinder + pos;
1928 grinder->tccache_w = 0;
1929 grinder->tccache_r = 0;
1931 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
1932 b = (uint8_t) ((qmask >> i) & 0x1);
1933 grinder->tccache_qmask[grinder->tccache_w] = b;
1934 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
1935 grinder->tccache_w += (b != 0);
1938 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
1939 grinder->tccache_qmask[grinder->tccache_w] = b;
1940 grinder->tccache_qindex[grinder->tccache_w] = qindex +
1941 RTE_SCHED_TRAFFIC_CLASS_BE;
1942 grinder->tccache_w += (b != 0);
1946 grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
1948 struct rte_sched_grinder *grinder = port->grinder + pos;
1949 struct rte_mbuf **qbase;
1953 if (grinder->tccache_r == grinder->tccache_w)
1956 qindex = grinder->tccache_qindex[grinder->tccache_r];
1957 qbase = rte_sched_port_qbase(port, qindex);
1958 qsize = rte_sched_port_qsize(port, qindex);
1960 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
1961 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
1962 grinder->qsize = qsize;
1964 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
1965 grinder->queue[0] = port->queue + qindex;
1966 grinder->qbase[0] = qbase;
1967 grinder->qindex[0] = qindex;
1968 grinder->tccache_r++;
1973 grinder->queue[0] = port->queue + qindex;
1974 grinder->queue[1] = port->queue + qindex + 1;
1975 grinder->queue[2] = port->queue + qindex + 2;
1976 grinder->queue[3] = port->queue + qindex + 3;
1978 grinder->qbase[0] = qbase;
1979 grinder->qbase[1] = qbase + qsize;
1980 grinder->qbase[2] = qbase + 2 * qsize;
1981 grinder->qbase[3] = qbase + 3 * qsize;
1983 grinder->qindex[0] = qindex;
1984 grinder->qindex[1] = qindex + 1;
1985 grinder->qindex[2] = qindex + 2;
1986 grinder->qindex[3] = qindex + 3;
1988 grinder->tccache_r++;
1993 grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
1995 struct rte_sched_grinder *grinder = port->grinder + pos;
1996 uint32_t pipe_qindex;
1997 uint16_t pipe_qmask;
1999 if (grinder->pcache_r < grinder->pcache_w) {
2000 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2001 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2002 grinder->pcache_r++;
2004 uint64_t bmp_slab = 0;
2005 uint32_t bmp_pos = 0;
2007 /* Get another non-empty pipe group */
2008 if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0))
2011 #ifdef RTE_SCHED_DEBUG
2012 debug_check_queue_slab(port, bmp_pos, bmp_slab);
2015 /* Return if pipe group already in one of the other grinders */
2016 port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2017 if (unlikely(grinder_pipe_exists(port, bmp_pos)))
2020 port->grinder_base_bmp_pos[pos] = bmp_pos;
2022 /* Install new pipe group into grinder's pipe cache */
2023 grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
2025 pipe_qmask = grinder->pcache_qmask[0];
2026 pipe_qindex = grinder->pcache_qindex[0];
2027 grinder->pcache_r = 1;
2030 /* Install new pipe in the grinder */
2031 grinder->pindex = pipe_qindex >> 4;
2032 grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
2033 grinder->pipe = port->pipe + grinder->pindex;
2034 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2035 grinder->productive = 0;
2037 grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
2038 grinder_next_tc(port, pos);
2040 /* Check for pipe exhaustion */
2041 if (grinder->pindex == port->pipe_loop) {
2042 port->pipe_exhaustion = 1;
2043 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
2051 grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
2053 struct rte_sched_grinder *grinder = port->grinder + pos;
2054 struct rte_sched_pipe *pipe = grinder->pipe;
2055 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2056 uint32_t qmask = grinder->qmask;
2058 grinder->wrr_tokens[0] =
2059 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2060 grinder->wrr_tokens[1] =
2061 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2062 grinder->wrr_tokens[2] =
2063 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2064 grinder->wrr_tokens[3] =
2065 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2067 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2068 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2069 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2070 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2072 grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2073 grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2074 grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2075 grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2079 grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
2081 struct rte_sched_grinder *grinder = port->grinder + pos;
2082 struct rte_sched_pipe *pipe = grinder->pipe;
2084 pipe->wrr_tokens[0] =
2085 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2086 RTE_SCHED_WRR_SHIFT;
2087 pipe->wrr_tokens[1] =
2088 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2089 RTE_SCHED_WRR_SHIFT;
2090 pipe->wrr_tokens[2] =
2091 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2092 RTE_SCHED_WRR_SHIFT;
2093 pipe->wrr_tokens[3] =
2094 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2095 RTE_SCHED_WRR_SHIFT;
2099 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
2101 struct rte_sched_grinder *grinder = port->grinder + pos;
2102 uint16_t wrr_tokens_min;
2104 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2105 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2106 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2107 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2109 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2110 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2112 grinder->wrr_tokens[0] -= wrr_tokens_min;
2113 grinder->wrr_tokens[1] -= wrr_tokens_min;
2114 grinder->wrr_tokens[2] -= wrr_tokens_min;
2115 grinder->wrr_tokens[3] -= wrr_tokens_min;
2119 #define grinder_evict(port, pos)
2122 grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
2124 struct rte_sched_grinder *grinder = port->grinder + pos;
2126 rte_prefetch0(grinder->pipe);
2127 rte_prefetch0(grinder->queue[0]);
2131 grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
2133 struct rte_sched_grinder *grinder = port->grinder + pos;
2134 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2136 qsize = grinder->qsize;
2139 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2140 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2142 rte_prefetch0(grinder->qbase[0] + qr[0]);
2146 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2147 qr[1] = grinder->queue[1]->qr & (qsize - 1);
2148 qr[2] = grinder->queue[2]->qr & (qsize - 1);
2149 qr[3] = grinder->queue[3]->qr & (qsize - 1);
2151 rte_prefetch0(grinder->qbase[0] + qr[0]);
2152 rte_prefetch0(grinder->qbase[1] + qr[1]);
2154 grinder_wrr_load(port, pos);
2155 grinder_wrr(port, pos);
2157 rte_prefetch0(grinder->qbase[2] + qr[2]);
2158 rte_prefetch0(grinder->qbase[3] + qr[3]);
2162 grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
2164 struct rte_sched_grinder *grinder = port->grinder + pos;
2165 uint32_t qpos = grinder->qpos;
2166 struct rte_mbuf **qbase = grinder->qbase[qpos];
2167 uint16_t qsize = grinder->qsize;
2168 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2170 grinder->pkt = qbase[qr];
2171 rte_prefetch0(grinder->pkt);
2173 if (unlikely((qr & 0x7) == 7)) {
2174 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2176 rte_prefetch0(qbase + qr_next);
2180 static inline uint32_t
2181 grinder_handle(struct rte_sched_port *port, uint32_t pos)
2183 struct rte_sched_grinder *grinder = port->grinder + pos;
2185 switch (grinder->state) {
2186 case e_GRINDER_PREFETCH_PIPE:
2188 if (grinder_next_pipe(port, pos)) {
2189 grinder_prefetch_pipe(port, pos);
2190 port->busy_grinders++;
2192 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2199 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2201 struct rte_sched_pipe *pipe = grinder->pipe;
2203 grinder->pipe_params = port->pipe_profiles + pipe->profile;
2204 grinder_prefetch_tc_queue_arrays(port, pos);
2205 grinder_credits_update(port, pos);
2207 grinder->state = e_GRINDER_PREFETCH_MBUF;
2211 case e_GRINDER_PREFETCH_MBUF:
2213 grinder_prefetch_mbuf(port, pos);
2215 grinder->state = e_GRINDER_READ_MBUF;
2219 case e_GRINDER_READ_MBUF:
2221 uint32_t wrr_active, result = 0;
2223 result = grinder_schedule(port, pos);
2225 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2227 /* Look for next packet within the same TC */
2228 if (result && grinder->qmask) {
2230 grinder_wrr(port, pos);
2232 grinder_prefetch_mbuf(port, pos);
2238 grinder_wrr_store(port, pos);
2240 /* Look for another active TC within same pipe */
2241 if (grinder_next_tc(port, pos)) {
2242 grinder_prefetch_tc_queue_arrays(port, pos);
2244 grinder->state = e_GRINDER_PREFETCH_MBUF;
2248 if (grinder->productive == 0 &&
2249 port->pipe_loop == RTE_SCHED_PIPE_INVALID)
2250 port->pipe_loop = grinder->pindex;
2252 grinder_evict(port, pos);
2254 /* Look for another active pipe */
2255 if (grinder_next_pipe(port, pos)) {
2256 grinder_prefetch_pipe(port, pos);
2258 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2262 /* No active pipe found */
2263 port->busy_grinders--;
2265 grinder->state = e_GRINDER_PREFETCH_PIPE;
2270 rte_panic("Algorithmic error (invalid state)\n");
2276 rte_sched_port_time_resync(struct rte_sched_port *port)
2278 uint64_t cycles = rte_get_tsc_cycles();
2279 uint64_t cycles_diff = cycles - port->time_cpu_cycles;
2280 uint64_t bytes_diff;
2282 /* Compute elapsed time in bytes */
2283 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2284 port->inv_cycles_per_byte);
2286 /* Advance port time */
2287 port->time_cpu_cycles = cycles;
2288 port->time_cpu_bytes += bytes_diff;
2289 if (port->time < port->time_cpu_bytes)
2290 port->time = port->time_cpu_bytes;
2292 /* Reset pipe loop detection */
2293 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
2297 rte_sched_port_exceptions(struct rte_sched_port *port, int second_pass)
2301 /* Check if any exception flag is set */
2302 exceptions = (second_pass && port->busy_grinders == 0) ||
2303 (port->pipe_exhaustion == 1);
2305 /* Clear exception flags */
2306 port->pipe_exhaustion = 0;
2312 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2316 port->pkts_out = pkts;
2317 port->n_pkts_out = 0;
2319 rte_sched_port_time_resync(port);
2321 /* Take each queue in the grinder one step further */
2322 for (i = 0, count = 0; ; i++) {
2323 count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2324 if ((count == n_pkts) ||
2325 rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {