4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_common.h>
39 #include <rte_memory.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42 #include <rte_prefetch.h>
43 #include <rte_branch_prediction.h>
46 #include "rte_sched.h"
47 #include "rte_bitmap.h"
48 #include "rte_sched_common.h"
49 #include "rte_approx.h"
51 #ifdef __INTEL_COMPILER
52 #pragma warning(disable:2259) /* conversion may lose significant bits */
55 #ifndef RTE_SCHED_OPTIMIZATIONS
56 #define RTE_SCHED_OPTIMIZATIONS 0
59 #if RTE_SCHED_OPTIMIZATIONS
60 #include <immintrin.h>
63 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
64 #define RTE_SCHED_WRR_SHIFT 3
65 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
66 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
67 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
69 struct rte_sched_subport {
70 /* Token bucket (TB) */
71 uint64_t tb_time; /* time of last update */
73 uint32_t tb_credits_per_period;
77 /* Traffic classes (TCs) */
78 uint64_t tc_time; /* time of next update */
79 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
80 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
83 /* TC oversubscription */
85 uint32_t tc_ov_wm_min;
86 uint32_t tc_ov_wm_max;
87 uint8_t tc_ov_period_id;
93 struct rte_sched_subport_stats stats;
96 struct rte_sched_pipe_profile {
97 /* Token bucket (TB) */
99 uint32_t tb_credits_per_period;
102 /* Pipe traffic classes */
104 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
105 uint8_t tc_ov_weight;
108 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE];
111 struct rte_sched_pipe {
112 /* Token bucket (TB) */
113 uint64_t tb_time; /* time of last update */
116 /* Pipe profile and flags */
119 /* Traffic classes (TCs) */
120 uint64_t tc_time; /* time of next update */
121 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
123 /* Weighted Round Robin (WRR) */
124 uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE];
126 /* TC oversubscription */
127 uint32_t tc_ov_credits;
128 uint8_t tc_ov_period_id;
130 } __rte_cache_aligned;
132 struct rte_sched_queue {
137 struct rte_sched_queue_extra {
138 struct rte_sched_queue_stats stats;
145 e_GRINDER_PREFETCH_PIPE = 0,
146 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
147 e_GRINDER_PREFETCH_MBUF,
152 * Path through the scheduler hierarchy used by the scheduler enqueue
153 * operation to identify the destination queue for the current
154 * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
155 * each packet, typically written by the classification stage and read
156 * by scheduler enqueue.
158 struct rte_sched_port_hierarchy {
159 uint32_t queue:2; /**< Queue ID (0 .. 3) */
160 uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
161 uint32_t pipe:20; /**< Pipe ID */
162 uint32_t subport:6; /**< Subport ID */
163 uint32_t color:2; /**< Color */
166 struct rte_sched_grinder {
168 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
169 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
174 enum grinder_state state;
177 struct rte_sched_subport *subport;
178 struct rte_sched_pipe *pipe;
179 struct rte_sched_pipe_profile *pipe_params;
182 uint8_t tccache_qmask[4];
183 uint32_t tccache_qindex[4];
189 struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
190 struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
191 uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
195 struct rte_mbuf *pkt;
198 uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
199 uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
200 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
203 struct rte_sched_port {
204 /* User parameters */
205 uint32_t n_subports_per_port;
206 uint32_t n_pipes_per_subport;
209 uint32_t frame_overhead;
210 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
211 uint32_t n_pipe_profiles;
212 uint32_t pipe_tc3_rate_max;
214 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
218 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
219 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
220 uint64_t time; /* Current NIC TX time measured in bytes */
221 double cycles_per_byte; /* CPU cycles per byte */
223 /* Scheduling loop detection */
225 uint32_t pipe_exhaustion;
228 struct rte_bitmap *bmp;
229 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
232 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
233 uint32_t busy_grinders;
234 struct rte_mbuf **pkts_out;
237 /* Queue base calculation */
238 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
241 /* Large data structures */
242 struct rte_sched_subport *subport;
243 struct rte_sched_pipe *pipe;
244 struct rte_sched_queue *queue;
245 struct rte_sched_queue_extra *queue_extra;
246 struct rte_sched_pipe_profile *pipe_profiles;
248 struct rte_mbuf **queue_array;
249 uint8_t memory[0] __rte_cache_aligned;
250 } __rte_cache_aligned;
252 enum rte_sched_port_array {
253 e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
254 e_RTE_SCHED_PORT_ARRAY_PIPE,
255 e_RTE_SCHED_PORT_ARRAY_QUEUE,
256 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
257 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
258 e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
259 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
260 e_RTE_SCHED_PORT_ARRAY_TOTAL,
263 #ifdef RTE_SCHED_COLLECT_STATS
265 static inline uint32_t
266 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
268 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
273 static inline uint32_t
274 rte_sched_port_queues_per_port(struct rte_sched_port *port)
276 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
280 rte_sched_port_check_params(struct rte_sched_port_params *params)
284 if (params == NULL) {
289 if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES)) {
294 if (params->rate == 0) {
299 if (params->mtu == 0) {
303 /* n_subports_per_port: non-zero, power of 2 */
304 if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port))) {
308 /* n_pipes_per_subport: non-zero, power of 2 */
309 if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport))) {
313 /* qsize: non-zero, power of 2,
314 * no bigger than 32K (due to 16-bit read/write pointers) */
315 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
316 uint16_t qsize = params->qsize[i];
318 if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
323 /* pipe_profiles and n_pipe_profiles */
324 if ((params->pipe_profiles == NULL) ||
325 (params->n_pipe_profiles == 0) ||
326 (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
330 for (i = 0; i < params->n_pipe_profiles; i ++) {
331 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
333 /* TB rate: non-zero, not greater than port rate */
334 if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
338 /* TB size: non-zero */
339 if (p->tb_size == 0) {
343 /* TC rate: non-zero, less than pipe rate */
344 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
345 if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate)) {
350 /* TC period: non-zero */
351 if (p->tc_period == 0) {
355 #ifdef RTE_SCHED_SUBPORT_TC_OV
356 /* TC3 oversubscription weight: non-zero */
357 if (p->tc_ov_weight == 0) {
362 /* Queue WRR weights: non-zero */
363 for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
364 if (p->wrr_weights[j] == 0) {
374 rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
376 uint32_t n_subports_per_port = params->n_subports_per_port;
377 uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
378 uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
379 uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
381 uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
382 uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
383 uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
384 uint32_t size_queue_extra = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
385 uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
386 uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
387 uint32_t size_per_pipe_queue_array, size_queue_array;
391 size_per_pipe_queue_array = 0;
392 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
393 size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct rte_mbuf *);
395 size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
399 if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
400 base += RTE_CACHE_LINE_ROUNDUP(size_subport);
402 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
403 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
405 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
406 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
408 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
409 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
411 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
412 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
414 if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
415 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
417 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
418 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
424 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
426 uint32_t size0, size1;
429 status = rte_sched_port_check_params(params);
431 RTE_LOG(NOTICE, SCHED,
432 "Port scheduler params check failed (%d)\n", status);
437 size0 = sizeof(struct rte_sched_port);
438 size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
440 return (size0 + size1);
444 rte_sched_port_config_qsize(struct rte_sched_port *port)
447 port->qsize_add[0] = 0;
448 port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
449 port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
450 port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
453 port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
454 port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
455 port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
456 port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
459 port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
460 port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
461 port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
462 port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
465 port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
466 port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
467 port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
468 port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
470 port->qsize_sum = port->qsize_add[15] + port->qsize[3];
474 rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
476 struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
478 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
479 " Token bucket: period = %u, credits per period = %u, size = %u\n"
480 " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
481 " Traffic class 3 oversubscription: weight = %hhu\n"
482 " WRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n",
487 p->tb_credits_per_period,
490 /* Traffic classes */
492 p->tc_credits_per_period[0],
493 p->tc_credits_per_period[1],
494 p->tc_credits_per_period[2],
495 p->tc_credits_per_period[3],
497 /* Traffic class 3 oversubscription */
501 p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3],
502 p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7],
503 p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11],
504 p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]);
507 static inline uint64_t
508 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
510 uint64_t time = time_ms;
511 time = (time * rate) / 1000;
517 rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
521 for (i = 0; i < port->n_pipe_profiles; i ++) {
522 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
523 struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
526 if (src->tb_rate == params->rate) {
527 dst->tb_credits_per_period = 1;
530 double tb_rate = ((double) src->tb_rate) / ((double) params->rate);
531 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
533 rte_approx(tb_rate, d, &dst->tb_credits_per_period, &dst->tb_period);
535 dst->tb_size = src->tb_size;
537 /* Traffic Classes */
538 dst->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
539 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
540 dst->tc_credits_per_period[j] = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
542 #ifdef RTE_SCHED_SUBPORT_TC_OV
543 dst->tc_ov_weight = src->tc_ov_weight;
547 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
548 uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
549 uint32_t lcd, lcd1, lcd2;
552 qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
554 wrr_cost[0] = src->wrr_weights[qindex];
555 wrr_cost[1] = src->wrr_weights[qindex + 1];
556 wrr_cost[2] = src->wrr_weights[qindex + 2];
557 wrr_cost[3] = src->wrr_weights[qindex + 3];
559 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
560 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
561 lcd = rte_get_lcd(lcd1, lcd2);
563 wrr_cost[0] = lcd / wrr_cost[0];
564 wrr_cost[1] = lcd / wrr_cost[1];
565 wrr_cost[2] = lcd / wrr_cost[2];
566 wrr_cost[3] = lcd / wrr_cost[3];
568 dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
569 dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
570 dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
571 dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
574 rte_sched_port_log_pipe_profile(port, i);
577 port->pipe_tc3_rate_max = 0;
578 for (i = 0; i < port->n_pipe_profiles; i ++) {
579 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
580 uint32_t pipe_tc3_rate = src->tc_rate[3];
582 if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
583 port->pipe_tc3_rate_max = pipe_tc3_rate;
588 struct rte_sched_port *
589 rte_sched_port_config(struct rte_sched_port_params *params)
591 struct rte_sched_port *port = NULL;
592 uint32_t mem_size, bmp_mem_size, n_queues_per_port, i;
594 /* Check user parameters. Determine the amount of memory to allocate */
595 mem_size = rte_sched_port_get_memory_footprint(params);
600 /* Allocate memory to store the data structures */
601 port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
606 /* compile time checks */
607 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
608 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1));
610 /* User parameters */
611 port->n_subports_per_port = params->n_subports_per_port;
612 port->n_pipes_per_subport = params->n_pipes_per_subport;
613 port->rate = params->rate;
614 port->mtu = params->mtu + params->frame_overhead;
615 port->frame_overhead = params->frame_overhead;
616 memcpy(port->qsize, params->qsize, sizeof(params->qsize));
617 port->n_pipe_profiles = params->n_pipe_profiles;
620 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
623 for (j = 0; j < e_RTE_METER_COLORS; j++) {
624 /* if min/max are both zero, then RED is disabled */
625 if ((params->red_params[i][j].min_th |
626 params->red_params[i][j].max_th) == 0) {
630 if (rte_red_config_init(&port->red_config[i][j],
631 params->red_params[i][j].wq_log2,
632 params->red_params[i][j].min_th,
633 params->red_params[i][j].max_th,
634 params->red_params[i][j].maxp_inv) != 0) {
642 port->time_cpu_cycles = rte_get_tsc_cycles();
643 port->time_cpu_bytes = 0;
645 port->cycles_per_byte = ((double) rte_get_tsc_hz()) / ((double) params->rate);
647 /* Scheduling loop detection */
648 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
649 port->pipe_exhaustion = 0;
652 port->busy_grinders = 0;
653 port->pkts_out = NULL;
654 port->n_pkts_out = 0;
656 /* Queue base calculation */
657 rte_sched_port_config_qsize(port);
659 /* Large data structures */
660 port->subport = (struct rte_sched_subport *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
661 port->pipe = (struct rte_sched_pipe *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
662 port->queue = (struct rte_sched_queue *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
663 port->queue_extra = (struct rte_sched_queue_extra *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
664 port->pipe_profiles = (struct rte_sched_pipe_profile *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
665 port->bmp_array = port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
666 port->queue_array = (struct rte_mbuf **) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
668 /* Pipe profile table */
669 rte_sched_port_config_pipe_profile_table(port, params);
672 n_queues_per_port = rte_sched_port_queues_per_port(port);
673 bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
674 port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array, bmp_mem_size);
675 if (port->bmp == NULL) {
676 RTE_LOG(ERR, SCHED, "Bitmap init error\n");
679 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
680 port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
687 rte_sched_port_free(struct rte_sched_port *port)
689 /* Check user parameters */
694 rte_bitmap_free(port->bmp);
699 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
701 struct rte_sched_subport *s = port->subport + i;
703 RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
704 " Token bucket: period = %u, credits per period = %u, size = %u\n"
705 " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
706 " Traffic class 3 oversubscription: wm min = %u, wm max = %u\n",
711 s->tb_credits_per_period,
714 /* Traffic classes */
716 s->tc_credits_per_period[0],
717 s->tc_credits_per_period[1],
718 s->tc_credits_per_period[2],
719 s->tc_credits_per_period[3],
721 /* Traffic class 3 oversubscription */
727 rte_sched_subport_config(struct rte_sched_port *port,
729 struct rte_sched_subport_params *params)
731 struct rte_sched_subport *s;
734 /* Check user parameters */
735 if ((port == NULL) ||
736 (subport_id >= port->n_subports_per_port) ||
741 if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
745 if (params->tb_size == 0) {
749 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
750 if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate)) {
755 if (params->tc_period == 0) {
759 s = port->subport + subport_id;
761 /* Token Bucket (TB) */
762 if (params->tb_rate == port->rate) {
763 s->tb_credits_per_period = 1;
766 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
767 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
769 rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
771 s->tb_size = params->tb_size;
772 s->tb_time = port->time;
773 s->tb_credits = s->tb_size / 2;
775 /* Traffic Classes (TCs) */
776 s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
777 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
778 s->tc_credits_per_period[i] = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
780 s->tc_time = port->time + s->tc_period;
781 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
782 s->tc_credits[i] = s->tc_credits_per_period[i];
785 #ifdef RTE_SCHED_SUBPORT_TC_OV
786 /* TC oversubscription */
787 s->tc_ov_wm_min = port->mtu;
788 s->tc_ov_wm_max = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->pipe_tc3_rate_max);
789 s->tc_ov_wm = s->tc_ov_wm_max;
790 s->tc_ov_period_id = 0;
796 rte_sched_port_log_subport_config(port, subport_id);
802 rte_sched_pipe_config(struct rte_sched_port *port,
805 int32_t pipe_profile)
807 struct rte_sched_subport *s;
808 struct rte_sched_pipe *p;
809 struct rte_sched_pipe_profile *params;
810 uint32_t deactivate, profile, i;
812 /* Check user parameters */
813 profile = (uint32_t) pipe_profile;
814 deactivate = (pipe_profile < 0);
815 if ((port == NULL) ||
816 (subport_id >= port->n_subports_per_port) ||
817 (pipe_id >= port->n_pipes_per_subport) ||
818 ((!deactivate) && (profile >= port->n_pipe_profiles))) {
822 /* Check that subport configuration is valid */
823 s = port->subport + subport_id;
824 if (s->tb_period == 0) {
828 p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
830 /* Handle the case when pipe already has a valid configuration */
832 params = port->pipe_profiles + p->profile;
834 #ifdef RTE_SCHED_SUBPORT_TC_OV
835 double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
836 double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
837 uint32_t tc3_ov = s->tc_ov;
839 /* Unplug pipe from its subport */
840 s->tc_ov_n -= params->tc_ov_weight;
841 s->tc_ov_rate -= pipe_tc3_rate;
842 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
844 if (s->tc_ov != tc3_ov) {
845 RTE_LOG(DEBUG, SCHED,
846 "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
847 subport_id, subport_tc3_rate, s->tc_ov_rate);
852 memset(p, 0, sizeof(struct rte_sched_pipe));
859 /* Apply the new pipe configuration */
860 p->profile = profile;
861 params = port->pipe_profiles + p->profile;
863 /* Token Bucket (TB) */
864 p->tb_time = port->time;
865 p->tb_credits = params->tb_size / 2;
867 /* Traffic Classes (TCs) */
868 p->tc_time = port->time + params->tc_period;
869 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
870 p->tc_credits[i] = params->tc_credits_per_period[i];
873 #ifdef RTE_SCHED_SUBPORT_TC_OV
875 /* Subport TC3 oversubscription */
876 double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
877 double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
878 uint32_t tc3_ov = s->tc_ov;
880 s->tc_ov_n += params->tc_ov_weight;
881 s->tc_ov_rate += pipe_tc3_rate;
882 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
884 if (s->tc_ov != tc3_ov) {
885 RTE_LOG(DEBUG, SCHED,
886 "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
887 subport_id, subport_tc3_rate, s->tc_ov_rate);
889 p->tc_ov_period_id = s->tc_ov_period_id;
890 p->tc_ov_credits = s->tc_ov_wm;
898 rte_sched_port_pkt_write(struct rte_mbuf *pkt,
899 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
900 uint32_t queue, enum rte_meter_color color)
902 struct rte_sched_port_hierarchy *sched
903 = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
905 sched->color = (uint32_t) color;
906 sched->subport = subport;
908 sched->traffic_class = traffic_class;
909 sched->queue = queue;
913 rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
914 uint32_t *subport, uint32_t *pipe,
915 uint32_t *traffic_class, uint32_t *queue)
917 const struct rte_sched_port_hierarchy *sched
918 = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
920 *subport = sched->subport;
922 *traffic_class = sched->traffic_class;
923 *queue = sched->queue;
928 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
930 const struct rte_sched_port_hierarchy *sched
931 = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
933 return (enum rte_meter_color) sched->color;
937 rte_sched_subport_read_stats(struct rte_sched_port *port,
939 struct rte_sched_subport_stats *stats,
942 struct rte_sched_subport *s;
944 /* Check user parameters */
945 if ((port == NULL) ||
946 (subport_id >= port->n_subports_per_port) ||
951 s = port->subport + subport_id;
953 /* Copy subport stats and clear */
954 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
955 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
957 /* Subport TC ovesubscription status */
964 rte_sched_queue_read_stats(struct rte_sched_port *port,
966 struct rte_sched_queue_stats *stats,
969 struct rte_sched_queue *q;
970 struct rte_sched_queue_extra *qe;
972 /* Check user parameters */
973 if ((port == NULL) ||
974 (queue_id >= rte_sched_port_queues_per_port(port)) ||
979 q = port->queue + queue_id;
980 qe = port->queue_extra + queue_id;
982 /* Copy queue stats and clear */
983 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
984 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
987 *qlen = q->qw - q->qr;
992 static inline uint32_t
993 rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
997 result = subport * port->n_pipes_per_subport + pipe;
998 result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
999 result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
1004 static inline struct rte_mbuf **
1005 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
1007 uint32_t pindex = qindex >> 4;
1008 uint32_t qpos = qindex & 0xF;
1010 return (port->queue_array + pindex * port->qsize_sum + port->qsize_add[qpos]);
1013 static inline uint16_t
1014 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
1016 uint32_t tc = (qindex >> 2) & 0x3;
1018 return port->qsize[tc];
1021 #ifdef RTE_SCHED_DEBUG
1024 rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
1026 struct rte_sched_queue *queue = port->queue + qindex;
1028 return (queue->qr == queue->qw);
1032 rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
1034 struct rte_sched_queue *queue = port->queue + qindex;
1035 uint16_t qsize = rte_sched_port_qsize(port, qindex);
1036 uint16_t qlen = queue->qw - queue->qr;
1038 return (qlen >= qsize);
1041 #endif /* RTE_SCHED_DEBUG */
1043 #ifdef RTE_SCHED_COLLECT_STATS
1046 rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1048 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1049 uint32_t tc_index = (qindex >> 2) & 0x3;
1050 uint32_t pkt_len = pkt->pkt_len;
1052 s->stats.n_pkts_tc[tc_index] += 1;
1053 s->stats.n_bytes_tc[tc_index] += pkt_len;
1057 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1059 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1060 uint32_t tc_index = (qindex >> 2) & 0x3;
1061 uint32_t pkt_len = pkt->pkt_len;
1063 s->stats.n_pkts_tc_dropped[tc_index] += 1;
1064 s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1068 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1070 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1071 uint32_t pkt_len = pkt->pkt_len;
1073 qe->stats.n_pkts += 1;
1074 qe->stats.n_bytes += pkt_len;
1078 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1080 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1081 uint32_t pkt_len = pkt->pkt_len;
1083 qe->stats.n_pkts_dropped += 1;
1084 qe->stats.n_bytes_dropped += pkt_len;
1087 #endif /* RTE_SCHED_COLLECT_STATS */
1089 #ifdef RTE_SCHED_RED
1092 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
1094 struct rte_sched_queue_extra *qe;
1095 struct rte_red_config *red_cfg;
1096 struct rte_red *red;
1098 enum rte_meter_color color;
1100 tc_index = (qindex >> 2) & 0x3;
1101 color = rte_sched_port_pkt_read_color(pkt);
1102 red_cfg = &port->red_config[tc_index][color];
1104 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1107 qe = port->queue_extra + qindex;
1110 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1114 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
1116 struct rte_sched_queue_extra *qe;
1117 struct rte_red *red;
1119 qe = port->queue_extra + qindex;
1122 rte_red_mark_queue_empty(red, port->time);
1127 #define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
1129 #define rte_sched_port_set_queue_empty_timestamp(port, qindex)
1131 #endif /* RTE_SCHED_RED */
1133 #ifdef RTE_SCHED_DEBUG
1136 debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
1140 qindex = pindex << 4;
1142 for (i = 0; i < 16; i ++){
1143 uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
1144 uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
1146 if (queue_empty != bmp_bit_clear){
1147 rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
1159 debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t bmp_slab)
1165 rte_panic("Empty slab at position %u\n", bmp_pos);
1169 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
1170 if (mask & bmp_slab){
1171 if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
1172 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1179 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1184 #endif /* RTE_SCHED_DEBUG */
1186 static inline uint32_t
1187 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port, struct rte_mbuf *pkt)
1189 struct rte_sched_queue *q;
1190 #ifdef RTE_SCHED_COLLECT_STATS
1191 struct rte_sched_queue_extra *qe;
1193 uint32_t subport, pipe, traffic_class, queue, qindex;
1195 rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
1197 qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1198 q = port->queue + qindex;
1200 #ifdef RTE_SCHED_COLLECT_STATS
1201 qe = port->queue_extra + qindex;
1209 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase)
1211 struct rte_sched_queue *q;
1212 struct rte_mbuf **q_qw;
1215 q = port->queue + qindex;
1216 qsize = rte_sched_port_qsize(port, qindex);
1217 q_qw = qbase + (q->qw & (qsize - 1));
1219 rte_prefetch0(q_qw);
1220 rte_bitmap_prefetch0(port->bmp, qindex);
1224 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
1226 struct rte_sched_queue *q;
1230 q = port->queue + qindex;
1231 qsize = rte_sched_port_qsize(port, qindex);
1232 qlen = q->qw - q->qr;
1234 /* Drop the packet (and update drop stats) when queue is full */
1235 if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) || (qlen >= qsize))) {
1236 rte_pktmbuf_free(pkt);
1237 #ifdef RTE_SCHED_COLLECT_STATS
1238 rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
1239 rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
1244 /* Enqueue packet */
1245 qbase[q->qw & (qsize - 1)] = pkt;
1248 /* Activate queue in the port bitmap */
1249 rte_bitmap_set(port->bmp, qindex);
1252 #ifdef RTE_SCHED_COLLECT_STATS
1253 rte_sched_port_update_subport_stats(port, qindex, pkt);
1254 rte_sched_port_update_queue_stats(port, qindex, pkt);
1262 * The enqueue function implements a 4-level pipeline with each stage processing
1263 * two different packets. The purpose of using a pipeline is to hide the latency
1264 * of prefetching the data structures. The naming convention is presented in the
1267 * p00 _______ p10 _______ p20 _______ p30 _______
1268 * ----->| |----->| |----->| |----->| |----->
1269 * | 0 | | 1 | | 2 | | 3 |
1270 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1275 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
1277 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, *pkt30, *pkt31, *pkt_last;
1278 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1279 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1280 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1285 /* Less then 6 input packets available, which is not enough to feed the pipeline */
1286 if (unlikely(n_pkts < 6)) {
1287 struct rte_mbuf **q_base[5];
1290 /* Prefetch the mbuf structure of each packet */
1291 for (i = 0; i < n_pkts; i ++) {
1292 rte_prefetch0(pkts[i]);
1295 /* Prefetch the queue structure for each queue */
1296 for (i = 0; i < n_pkts; i ++) {
1297 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
1300 /* Prefetch the write pointer location of each queue */
1301 for (i = 0; i < n_pkts; i ++) {
1302 q_base[i] = rte_sched_port_qbase(port, q[i]);
1303 rte_sched_port_enqueue_qwa_prefetch0(port, q[i], q_base[i]);
1306 /* Write each packet to its queue */
1307 for (i = 0; i < n_pkts; i ++) {
1308 result += rte_sched_port_enqueue_qwa(port, q[i], q_base[i], pkts[i]);
1314 /* Feed the first 3 stages of the pipeline (6 packets needed) */
1317 rte_prefetch0(pkt20);
1318 rte_prefetch0(pkt21);
1322 rte_prefetch0(pkt10);
1323 rte_prefetch0(pkt11);
1325 q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
1326 q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
1330 rte_prefetch0(pkt00);
1331 rte_prefetch0(pkt01);
1333 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1334 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1336 q20_base = rte_sched_port_qbase(port, q20);
1337 q21_base = rte_sched_port_qbase(port, q21);
1338 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1339 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1341 /* Run the pipeline */
1342 for (i = 6; i < (n_pkts & (~1)); i += 2) {
1343 /* Propagate stage inputs */
1354 q30_base = q20_base;
1355 q31_base = q21_base;
1357 /* Stage 0: Get packets in */
1359 pkt01 = pkts[i + 1];
1360 rte_prefetch0(pkt00);
1361 rte_prefetch0(pkt01);
1363 /* Stage 1: Prefetch queue structure storing queue pointers */
1364 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1365 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1367 /* Stage 2: Prefetch queue write location */
1368 q20_base = rte_sched_port_qbase(port, q20);
1369 q21_base = rte_sched_port_qbase(port, q21);
1370 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1371 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1373 /* Stage 3: Write packet to queue and activate queue */
1374 r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
1375 r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
1376 result += r30 + r31;
1379 /* Drain the pipeline (exactly 6 packets). Handle the last packet in the case
1380 of an odd number of input packets. */
1381 pkt_last = pkts[n_pkts - 1];
1382 rte_prefetch0(pkt_last);
1384 q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
1385 q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
1387 q10_base = rte_sched_port_qbase(port, q10);
1388 q11_base = rte_sched_port_qbase(port, q11);
1389 rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
1390 rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
1392 r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
1393 r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
1394 result += r20 + r21;
1396 q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
1398 q00_base = rte_sched_port_qbase(port, q00);
1399 q01_base = rte_sched_port_qbase(port, q01);
1400 rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
1401 rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
1403 r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
1404 r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
1405 result += r10 + r11;
1407 q_last_base = rte_sched_port_qbase(port, q_last);
1408 rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
1410 r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
1411 r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
1412 result += r00 + r01;
1415 r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
1422 #ifndef RTE_SCHED_SUBPORT_TC_OV
1425 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1427 struct rte_sched_grinder *grinder = port->grinder + pos;
1428 struct rte_sched_subport *subport = grinder->subport;
1429 struct rte_sched_pipe *pipe = grinder->pipe;
1430 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1434 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1435 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1436 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1437 subport->tb_time += n_periods * subport->tb_period;
1440 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1441 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1442 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1443 pipe->tb_time += n_periods * params->tb_period;
1446 if (unlikely(port->time >= subport->tc_time)) {
1447 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1448 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1449 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1450 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1451 subport->tc_time = port->time + subport->tc_period;
1455 if (unlikely(port->time >= pipe->tc_time)) {
1456 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1457 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1458 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1459 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1460 pipe->tc_time = port->time + params->tc_period;
1466 static inline uint32_t
1467 grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
1469 struct rte_sched_grinder *grinder = port->grinder + pos;
1470 struct rte_sched_subport *subport = grinder->subport;
1471 uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
1472 uint32_t tc_ov_consumption_max;
1473 uint32_t tc_ov_wm = subport->tc_ov_wm;
1475 if (subport->tc_ov == 0) {
1476 return subport->tc_ov_wm_max;
1479 tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
1480 tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
1481 tc_ov_consumption[2] = subport->tc_credits_per_period[2] - subport->tc_credits[2];
1482 tc_ov_consumption[3] = subport->tc_credits_per_period[3] - subport->tc_credits[3];
1484 tc_ov_consumption_max = subport->tc_credits_per_period[3] -
1485 (tc_ov_consumption[0] + tc_ov_consumption[1] + tc_ov_consumption[2]);
1487 if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
1488 tc_ov_wm -= tc_ov_wm >> 7;
1489 if (tc_ov_wm < subport->tc_ov_wm_min) {
1490 tc_ov_wm = subport->tc_ov_wm_min;
1495 tc_ov_wm += (tc_ov_wm >> 7) + 1;
1496 if (tc_ov_wm > subport->tc_ov_wm_max) {
1497 tc_ov_wm = subport->tc_ov_wm_max;
1503 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1505 struct rte_sched_grinder *grinder = port->grinder + pos;
1506 struct rte_sched_subport *subport = grinder->subport;
1507 struct rte_sched_pipe *pipe = grinder->pipe;
1508 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1512 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1513 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1514 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1515 subport->tb_time += n_periods * subport->tb_period;
1518 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1519 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1520 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1521 pipe->tb_time += n_periods * params->tb_period;
1524 if (unlikely(port->time >= subport->tc_time)) {
1525 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
1527 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1528 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1529 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1530 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1532 subport->tc_time = port->time + subport->tc_period;
1533 subport->tc_ov_period_id ++;
1537 if (unlikely(port->time >= pipe->tc_time)) {
1538 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1539 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1540 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1541 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1542 pipe->tc_time = port->time + params->tc_period;
1545 /* Pipe TCs - Oversubscription */
1546 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
1547 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
1549 pipe->tc_ov_period_id = subport->tc_ov_period_id;
1553 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
1556 #ifndef RTE_SCHED_SUBPORT_TC_OV
1559 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1561 struct rte_sched_grinder *grinder = port->grinder + pos;
1562 struct rte_sched_subport *subport = grinder->subport;
1563 struct rte_sched_pipe *pipe = grinder->pipe;
1564 struct rte_mbuf *pkt = grinder->pkt;
1565 uint32_t tc_index = grinder->tc_index;
1566 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1567 uint32_t subport_tb_credits = subport->tb_credits;
1568 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1569 uint32_t pipe_tb_credits = pipe->tb_credits;
1570 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1573 /* Check queue credits */
1574 enough_credits = (pkt_len <= subport_tb_credits) &&
1575 (pkt_len <= subport_tc_credits) &&
1576 (pkt_len <= pipe_tb_credits) &&
1577 (pkt_len <= pipe_tc_credits);
1579 if (!enough_credits) {
1583 /* Update port credits */
1584 subport->tb_credits -= pkt_len;
1585 subport->tc_credits[tc_index] -= pkt_len;
1586 pipe->tb_credits -= pkt_len;
1587 pipe->tc_credits[tc_index] -= pkt_len;
1595 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1597 struct rte_sched_grinder *grinder = port->grinder + pos;
1598 struct rte_sched_subport *subport = grinder->subport;
1599 struct rte_sched_pipe *pipe = grinder->pipe;
1600 struct rte_mbuf *pkt = grinder->pkt;
1601 uint32_t tc_index = grinder->tc_index;
1602 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1603 uint32_t subport_tb_credits = subport->tb_credits;
1604 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1605 uint32_t pipe_tb_credits = pipe->tb_credits;
1606 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1607 uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits};
1608 uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
1609 uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
1612 /* Check pipe and subport credits */
1613 enough_credits = (pkt_len <= subport_tb_credits) &&
1614 (pkt_len <= subport_tc_credits) &&
1615 (pkt_len <= pipe_tb_credits) &&
1616 (pkt_len <= pipe_tc_credits) &&
1617 (pkt_len <= pipe_tc_ov_credits);
1619 if (!enough_credits) {
1623 /* Update pipe and subport credits */
1624 subport->tb_credits -= pkt_len;
1625 subport->tc_credits[tc_index] -= pkt_len;
1626 pipe->tb_credits -= pkt_len;
1627 pipe->tc_credits[tc_index] -= pkt_len;
1628 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
1633 #endif /* RTE_SCHED_SUBPORT_TC_OV */
1637 grinder_schedule(struct rte_sched_port *port, uint32_t pos)
1639 struct rte_sched_grinder *grinder = port->grinder + pos;
1640 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
1641 struct rte_mbuf *pkt = grinder->pkt;
1642 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1644 if (!grinder_credits_check(port, pos)) {
1648 /* Advance port time */
1649 port->time += pkt_len;
1652 port->pkts_out[port->n_pkts_out ++] = pkt;
1654 grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
1655 if (queue->qr == queue->qw) {
1656 uint32_t qindex = grinder->qindex[grinder->qpos];
1658 rte_bitmap_clear(port->bmp, qindex);
1659 grinder->qmask &= ~(1 << grinder->qpos);
1660 grinder->wrr_mask[grinder->qpos] = 0;
1661 rte_sched_port_set_queue_empty_timestamp(port, qindex);
1664 /* Reset pipe loop detection */
1665 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1666 grinder->productive = 1;
1671 #if RTE_SCHED_OPTIMIZATIONS
1674 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1676 __m128i index = _mm_set1_epi32 (base_pipe);
1677 __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
1678 __m128i res = _mm_cmpeq_epi32(pipes, index);
1679 pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
1680 pipes = _mm_cmpeq_epi32(pipes, index);
1681 res = _mm_or_si128(res, pipes);
1683 if (_mm_testz_si128(res, res))
1692 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1696 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
1697 if (port->grinder_base_bmp_pos[i] == base_pipe) {
1705 #endif /* RTE_SCHED_OPTIMIZATIONS */
1708 grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
1710 struct rte_sched_grinder *grinder = port->grinder + pos;
1713 grinder->pcache_w = 0;
1714 grinder->pcache_r = 0;
1716 w[0] = (uint16_t) bmp_slab;
1717 w[1] = (uint16_t) (bmp_slab >> 16);
1718 w[2] = (uint16_t) (bmp_slab >> 32);
1719 w[3] = (uint16_t) (bmp_slab >> 48);
1721 grinder->pcache_qmask[grinder->pcache_w] = w[0];
1722 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
1723 grinder->pcache_w += (w[0] != 0);
1725 grinder->pcache_qmask[grinder->pcache_w] = w[1];
1726 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
1727 grinder->pcache_w += (w[1] != 0);
1729 grinder->pcache_qmask[grinder->pcache_w] = w[2];
1730 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
1731 grinder->pcache_w += (w[2] != 0);
1733 grinder->pcache_qmask[grinder->pcache_w] = w[3];
1734 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
1735 grinder->pcache_w += (w[3] != 0);
1739 grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
1741 struct rte_sched_grinder *grinder = port->grinder + pos;
1744 grinder->tccache_w = 0;
1745 grinder->tccache_r = 0;
1747 b[0] = (uint8_t) (qmask & 0xF);
1748 b[1] = (uint8_t) ((qmask >> 4) & 0xF);
1749 b[2] = (uint8_t) ((qmask >> 8) & 0xF);
1750 b[3] = (uint8_t) ((qmask >> 12) & 0xF);
1752 grinder->tccache_qmask[grinder->tccache_w] = b[0];
1753 grinder->tccache_qindex[grinder->tccache_w] = qindex;
1754 grinder->tccache_w += (b[0] != 0);
1756 grinder->tccache_qmask[grinder->tccache_w] = b[1];
1757 grinder->tccache_qindex[grinder->tccache_w] = qindex + 4;
1758 grinder->tccache_w += (b[1] != 0);
1760 grinder->tccache_qmask[grinder->tccache_w] = b[2];
1761 grinder->tccache_qindex[grinder->tccache_w] = qindex + 8;
1762 grinder->tccache_w += (b[2] != 0);
1764 grinder->tccache_qmask[grinder->tccache_w] = b[3];
1765 grinder->tccache_qindex[grinder->tccache_w] = qindex + 12;
1766 grinder->tccache_w += (b[3] != 0);
1770 grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
1772 struct rte_sched_grinder *grinder = port->grinder + pos;
1773 struct rte_mbuf **qbase;
1777 if (grinder->tccache_r == grinder->tccache_w) {
1781 qindex = grinder->tccache_qindex[grinder->tccache_r];
1782 qbase = rte_sched_port_qbase(port, qindex);
1783 qsize = rte_sched_port_qsize(port, qindex);
1785 grinder->tc_index = (qindex >> 2) & 0x3;
1786 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
1787 grinder->qsize = qsize;
1789 grinder->qindex[0] = qindex;
1790 grinder->qindex[1] = qindex + 1;
1791 grinder->qindex[2] = qindex + 2;
1792 grinder->qindex[3] = qindex + 3;
1794 grinder->queue[0] = port->queue + qindex;
1795 grinder->queue[1] = port->queue + qindex + 1;
1796 grinder->queue[2] = port->queue + qindex + 2;
1797 grinder->queue[3] = port->queue + qindex + 3;
1799 grinder->qbase[0] = qbase;
1800 grinder->qbase[1] = qbase + qsize;
1801 grinder->qbase[2] = qbase + 2 * qsize;
1802 grinder->qbase[3] = qbase + 3 * qsize;
1804 grinder->tccache_r ++;
1809 grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
1811 struct rte_sched_grinder *grinder = port->grinder + pos;
1812 uint32_t pipe_qindex;
1813 uint16_t pipe_qmask;
1815 if (grinder->pcache_r < grinder->pcache_w) {
1816 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
1817 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
1818 grinder->pcache_r ++;
1820 uint64_t bmp_slab = 0;
1821 uint32_t bmp_pos = 0;
1823 /* Get another non-empty pipe group */
1824 if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0)) {
1828 #ifdef RTE_SCHED_DEBUG
1829 debug_check_queue_slab(port, bmp_pos, bmp_slab);
1832 /* Return if pipe group already in one of the other grinders */
1833 port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
1834 if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
1837 port->grinder_base_bmp_pos[pos] = bmp_pos;
1839 /* Install new pipe group into grinder's pipe cache */
1840 grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
1842 pipe_qmask = grinder->pcache_qmask[0];
1843 pipe_qindex = grinder->pcache_qindex[0];
1844 grinder->pcache_r = 1;
1847 /* Install new pipe in the grinder */
1848 grinder->pindex = pipe_qindex >> 4;
1849 grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
1850 grinder->pipe = port->pipe + grinder->pindex;
1851 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
1852 grinder->productive = 0;
1854 grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
1855 grinder_next_tc(port, pos);
1857 /* Check for pipe exhaustion */
1858 if (grinder->pindex == port->pipe_loop) {
1859 port->pipe_exhaustion = 1;
1860 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1868 grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
1870 struct rte_sched_grinder *grinder = port->grinder + pos;
1871 struct rte_sched_pipe *pipe = grinder->pipe;
1872 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
1873 uint32_t tc_index = grinder->tc_index;
1874 uint32_t qmask = grinder->qmask;
1877 qindex = tc_index * 4;
1879 grinder->wrr_tokens[0] = ((uint16_t) pipe->wrr_tokens[qindex]) << RTE_SCHED_WRR_SHIFT;
1880 grinder->wrr_tokens[1] = ((uint16_t) pipe->wrr_tokens[qindex + 1]) << RTE_SCHED_WRR_SHIFT;
1881 grinder->wrr_tokens[2] = ((uint16_t) pipe->wrr_tokens[qindex + 2]) << RTE_SCHED_WRR_SHIFT;
1882 grinder->wrr_tokens[3] = ((uint16_t) pipe->wrr_tokens[qindex + 3]) << RTE_SCHED_WRR_SHIFT;
1884 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
1885 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
1886 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
1887 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
1889 grinder->wrr_cost[0] = pipe_params->wrr_cost[qindex];
1890 grinder->wrr_cost[1] = pipe_params->wrr_cost[qindex + 1];
1891 grinder->wrr_cost[2] = pipe_params->wrr_cost[qindex + 2];
1892 grinder->wrr_cost[3] = pipe_params->wrr_cost[qindex + 3];
1896 grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
1898 struct rte_sched_grinder *grinder = port->grinder + pos;
1899 struct rte_sched_pipe *pipe = grinder->pipe;
1900 uint32_t tc_index = grinder->tc_index;
1903 qindex = tc_index * 4;
1905 pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
1906 pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
1907 pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
1908 pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
1912 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
1914 struct rte_sched_grinder *grinder = port->grinder + pos;
1915 uint16_t wrr_tokens_min;
1917 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
1918 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
1919 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
1920 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
1922 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
1923 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
1925 grinder->wrr_tokens[0] -= wrr_tokens_min;
1926 grinder->wrr_tokens[1] -= wrr_tokens_min;
1927 grinder->wrr_tokens[2] -= wrr_tokens_min;
1928 grinder->wrr_tokens[3] -= wrr_tokens_min;
1932 #define grinder_evict(port, pos)
1935 grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
1937 struct rte_sched_grinder *grinder = port->grinder + pos;
1939 rte_prefetch0(grinder->pipe);
1940 rte_prefetch0(grinder->queue[0]);
1944 grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
1946 struct rte_sched_grinder *grinder = port->grinder + pos;
1947 uint16_t qsize, qr[4];
1949 qsize = grinder->qsize;
1950 qr[0] = grinder->queue[0]->qr & (qsize - 1);
1951 qr[1] = grinder->queue[1]->qr & (qsize - 1);
1952 qr[2] = grinder->queue[2]->qr & (qsize - 1);
1953 qr[3] = grinder->queue[3]->qr & (qsize - 1);
1955 rte_prefetch0(grinder->qbase[0] + qr[0]);
1956 rte_prefetch0(grinder->qbase[1] + qr[1]);
1958 grinder_wrr_load(port, pos);
1959 grinder_wrr(port, pos);
1961 rte_prefetch0(grinder->qbase[2] + qr[2]);
1962 rte_prefetch0(grinder->qbase[3] + qr[3]);
1966 grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
1968 struct rte_sched_grinder *grinder = port->grinder + pos;
1969 uint32_t qpos = grinder->qpos;
1970 struct rte_mbuf **qbase = grinder->qbase[qpos];
1971 uint16_t qsize = grinder->qsize;
1972 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
1974 grinder->pkt = qbase[qr];
1975 rte_prefetch0(grinder->pkt);
1977 if (unlikely((qr & 0x7) == 7)) {
1978 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
1980 rte_prefetch0(qbase + qr_next);
1984 static inline uint32_t
1985 grinder_handle(struct rte_sched_port *port, uint32_t pos)
1987 struct rte_sched_grinder *grinder = port->grinder + pos;
1989 switch (grinder->state) {
1990 case e_GRINDER_PREFETCH_PIPE:
1992 if (grinder_next_pipe(port, pos)) {
1993 grinder_prefetch_pipe(port, pos);
1994 port->busy_grinders ++;
1996 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2003 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2005 struct rte_sched_pipe *pipe = grinder->pipe;
2007 grinder->pipe_params = port->pipe_profiles + pipe->profile;
2008 grinder_prefetch_tc_queue_arrays(port, pos);
2009 grinder_credits_update(port, pos);
2011 grinder->state = e_GRINDER_PREFETCH_MBUF;
2015 case e_GRINDER_PREFETCH_MBUF:
2017 grinder_prefetch_mbuf(port, pos);
2019 grinder->state = e_GRINDER_READ_MBUF;
2023 case e_GRINDER_READ_MBUF:
2025 uint32_t result = 0;
2027 result = grinder_schedule(port, pos);
2029 /* Look for next packet within the same TC */
2030 if (result && grinder->qmask) {
2031 grinder_wrr(port, pos);
2032 grinder_prefetch_mbuf(port, pos);
2036 grinder_wrr_store(port, pos);
2038 /* Look for another active TC within same pipe */
2039 if (grinder_next_tc(port, pos)) {
2040 grinder_prefetch_tc_queue_arrays(port, pos);
2042 grinder->state = e_GRINDER_PREFETCH_MBUF;
2045 if ((grinder->productive == 0) && (port->pipe_loop == RTE_SCHED_PIPE_INVALID)) {
2046 port->pipe_loop = grinder->pindex;
2048 grinder_evict(port, pos);
2050 /* Look for another active pipe */
2051 if (grinder_next_pipe(port, pos)) {
2052 grinder_prefetch_pipe(port, pos);
2054 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2058 /* No active pipe found */
2059 port->busy_grinders --;
2061 grinder->state = e_GRINDER_PREFETCH_PIPE;
2066 rte_panic("Algorithmic error (invalid state)\n");
2072 rte_sched_port_time_resync(struct rte_sched_port *port)
2074 uint64_t cycles = rte_get_tsc_cycles();
2075 uint64_t cycles_diff = cycles - port->time_cpu_cycles;
2076 double bytes_diff = ((double) cycles_diff) / port->cycles_per_byte;
2078 /* Advance port time */
2079 port->time_cpu_cycles = cycles;
2080 port->time_cpu_bytes += (uint64_t) bytes_diff;
2081 if (port->time < port->time_cpu_bytes) {
2082 port->time = port->time_cpu_bytes;
2085 /* Reset pipe loop detection */
2086 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
2090 rte_sched_port_exceptions(struct rte_sched_port *port, int second_pass)
2094 /* Check if any exception flag is set */
2095 exceptions = (second_pass && port->busy_grinders == 0) ||
2096 (port->pipe_exhaustion == 1);
2098 /* Clear exception flags */
2099 port->pipe_exhaustion = 0;
2105 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2109 port->pkts_out = pkts;
2110 port->n_pkts_out = 0;
2112 rte_sched_port_time_resync(port);
2114 /* Take each queue in the grinder one step further */
2115 for (i = 0, count = 0; ; i ++) {
2116 count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2117 if ((count == n_pkts) ||
2118 rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {