4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_common.h>
39 #include <rte_memory.h>
40 #include <rte_memzone.h>
41 #include <rte_cycles.h>
42 #include <rte_prefetch.h>
43 #include <rte_branch_prediction.h>
46 #include "rte_sched.h"
47 #include "rte_bitmap.h"
48 #include "rte_sched_common.h"
49 #include "rte_approx.h"
51 #ifdef __INTEL_COMPILER
52 #pragma warning(disable:2259) /* conversion may lose significant bits */
55 #ifndef RTE_SCHED_DEBUG
56 #define RTE_SCHED_DEBUG 0
59 #ifndef RTE_SCHED_OPTIMIZATIONS
60 #define RTE_SCHED_OPTIMIZATIONS 0
63 #if RTE_SCHED_OPTIMIZATIONS
64 #include <immintrin.h>
67 #define RTE_SCHED_ENQUEUE 1
69 #define RTE_SCHED_TS 1
71 #if RTE_SCHED_TS == 0 /* Infinite credits. Traffic shaping disabled. */
72 #define RTE_SCHED_TS_CREDITS_UPDATE 0
73 #define RTE_SCHED_TS_CREDITS_CHECK 0
74 #else /* Real Credits. Full traffic shaping implemented. */
75 #define RTE_SCHED_TS_CREDITS_UPDATE 1
76 #define RTE_SCHED_TS_CREDITS_CHECK 1
79 #ifndef RTE_SCHED_TB_RATE_CONFIG_ERR
80 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
83 #define RTE_SCHED_WRR 1
85 #ifndef RTE_SCHED_WRR_SHIFT
86 #define RTE_SCHED_WRR_SHIFT 3
89 #ifndef RTE_SCHED_PORT_N_GRINDERS
90 #define RTE_SCHED_PORT_N_GRINDERS 8
92 #if (RTE_SCHED_PORT_N_GRINDERS == 0) || (RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1))
93 #error Number of grinders must be non-zero and a power of 2
95 #if (RTE_SCHED_OPTIMIZATIONS && (RTE_SCHED_PORT_N_GRINDERS != 8))
96 #error Number of grinders must be 8 when RTE_SCHED_OPTIMIZATIONS is set
99 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
101 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
103 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
105 struct rte_sched_subport {
106 /* Token bucket (TB) */
107 uint64_t tb_time; /* time of last update */
109 uint32_t tb_credits_per_period;
113 /* Traffic classes (TCs) */
114 uint64_t tc_time; /* time of next update */
115 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
116 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
119 /* TC oversubscription */
121 uint32_t tc_ov_wm_min;
122 uint32_t tc_ov_wm_max;
123 uint8_t tc_ov_period_id;
129 struct rte_sched_subport_stats stats;
132 struct rte_sched_pipe_profile {
133 /* Token bucket (TB) */
135 uint32_t tb_credits_per_period;
138 /* Pipe traffic classes */
140 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
141 uint8_t tc_ov_weight;
144 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE];
147 struct rte_sched_pipe {
148 /* Token bucket (TB) */
149 uint64_t tb_time; /* time of last update */
152 /* Pipe profile and flags */
155 /* Traffic classes (TCs) */
156 uint64_t tc_time; /* time of next update */
157 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
159 /* Weighted Round Robin (WRR) */
160 uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE];
162 /* TC oversubscription */
163 uint32_t tc_ov_credits;
164 uint8_t tc_ov_period_id;
166 } __rte_cache_aligned;
168 struct rte_sched_queue {
173 struct rte_sched_queue_extra {
174 struct rte_sched_queue_stats stats;
181 e_GRINDER_PREFETCH_PIPE = 0,
182 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
183 e_GRINDER_PREFETCH_MBUF,
187 struct rte_sched_grinder {
189 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
190 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
195 enum grinder_state state;
198 struct rte_sched_subport *subport;
199 struct rte_sched_pipe *pipe;
200 struct rte_sched_pipe_profile *pipe_params;
203 uint8_t tccache_qmask[4];
204 uint32_t tccache_qindex[4];
210 struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
211 struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
212 uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
216 struct rte_mbuf *pkt;
219 uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
220 uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
221 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
224 struct rte_sched_port {
225 /* User parameters */
226 uint32_t n_subports_per_port;
227 uint32_t n_pipes_per_subport;
230 uint32_t frame_overhead;
231 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
232 uint32_t n_pipe_profiles;
233 uint32_t pipe_tc3_rate_max;
235 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
239 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
240 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
241 uint64_t time; /* Current NIC TX time measured in bytes */
242 double cycles_per_byte; /* CPU cycles per byte */
244 /* Scheduling loop detection */
246 uint32_t pipe_exhaustion;
249 struct rte_bitmap *bmp;
250 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
253 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
254 uint32_t busy_grinders;
255 struct rte_mbuf **pkts_out;
258 /* Queue base calculation */
259 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
262 /* Large data structures */
263 struct rte_sched_subport *subport;
264 struct rte_sched_pipe *pipe;
265 struct rte_sched_queue *queue;
266 struct rte_sched_queue_extra *queue_extra;
267 struct rte_sched_pipe_profile *pipe_profiles;
269 struct rte_mbuf **queue_array;
270 uint8_t memory[0] __rte_cache_aligned;
271 } __rte_cache_aligned;
273 enum rte_sched_port_array {
274 e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
275 e_RTE_SCHED_PORT_ARRAY_PIPE,
276 e_RTE_SCHED_PORT_ARRAY_QUEUE,
277 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
278 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
279 e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
280 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
281 e_RTE_SCHED_PORT_ARRAY_TOTAL,
284 #ifdef RTE_SCHED_COLLECT_STATS
286 static inline uint32_t
287 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
289 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
294 static inline uint32_t
295 rte_sched_port_queues_per_port(struct rte_sched_port *port)
297 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
301 rte_sched_port_check_params(struct rte_sched_port_params *params)
305 if (params == NULL) {
310 if (params->name == NULL) {
315 if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES)) {
320 if (params->rate == 0) {
325 if (params->mtu == 0) {
329 /* n_subports_per_port: non-zero, power of 2 */
330 if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port))) {
334 /* n_pipes_per_subport: non-zero, power of 2 */
335 if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport))) {
339 /* qsize: non-zero, power of 2, no bigger than 32K (due to 16-bit read/write pointers) */
340 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
341 uint16_t qsize = params->qsize[i];
343 if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
348 /* pipe_profiles and n_pipe_profiles */
349 if ((params->pipe_profiles == NULL) ||
350 (params->n_pipe_profiles == 0) ||
351 (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
355 for (i = 0; i < params->n_pipe_profiles; i ++) {
356 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
358 /* TB rate: non-zero, not greater than port rate */
359 if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
363 /* TB size: non-zero */
364 if (p->tb_size == 0) {
368 /* TC rate: non-zero, less than pipe rate */
369 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
370 if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate)) {
375 /* TC period: non-zero */
376 if (p->tc_period == 0) {
380 #ifdef RTE_SCHED_SUBPORT_TC_OV
381 /* TC3 oversubscription weight: non-zero */
382 if (p->tc_ov_weight == 0) {
387 /* Queue WRR weights: non-zero */
388 for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
389 if (p->wrr_weights[j] == 0) {
399 rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
401 uint32_t n_subports_per_port = params->n_subports_per_port;
402 uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
403 uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
404 uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
406 uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
407 uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
408 uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
409 uint32_t size_queue_extra = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
410 uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
411 uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
412 uint32_t size_per_pipe_queue_array, size_queue_array;
416 size_per_pipe_queue_array = 0;
417 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
418 size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct rte_mbuf *);
420 size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
424 if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
425 base += CACHE_LINE_ROUNDUP(size_subport);
427 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
428 base += CACHE_LINE_ROUNDUP(size_pipe);
430 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
431 base += CACHE_LINE_ROUNDUP(size_queue);
433 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
434 base += CACHE_LINE_ROUNDUP(size_queue_extra);
436 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
437 base += CACHE_LINE_ROUNDUP(size_pipe_profiles);
439 if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
440 base += CACHE_LINE_ROUNDUP(size_bmp_array);
442 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
443 base += CACHE_LINE_ROUNDUP(size_queue_array);
449 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
451 uint32_t size0, size1;
454 status = rte_sched_port_check_params(params);
456 RTE_LOG(INFO, SCHED, "Port scheduler params check failed (%d)\n", status);
461 size0 = sizeof(struct rte_sched_port);
462 size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
464 return (size0 + size1);
468 rte_sched_port_config_qsize(struct rte_sched_port *port)
471 port->qsize_add[0] = 0;
472 port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
473 port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
474 port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
477 port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
478 port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
479 port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
480 port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
483 port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
484 port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
485 port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
486 port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
489 port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
490 port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
491 port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
492 port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
494 port->qsize_sum = port->qsize_add[15] + port->qsize[3];
498 rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
500 struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
502 RTE_LOG(INFO, SCHED, "Low level config for pipe profile %u:\n"
503 "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
504 "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
505 "\tTraffic class 3 oversubscription: weight = %hhu\n"
506 "\tWRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n",
511 p->tb_credits_per_period,
514 /* Traffic classes */
516 p->tc_credits_per_period[0],
517 p->tc_credits_per_period[1],
518 p->tc_credits_per_period[2],
519 p->tc_credits_per_period[3],
521 /* Traffic class 3 oversubscription */
525 p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3],
526 p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7],
527 p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11],
528 p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]);
531 static inline uint64_t
532 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
534 uint64_t time = time_ms;
535 time = (time * rate) / 1000;
541 rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
545 for (i = 0; i < port->n_pipe_profiles; i ++) {
546 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
547 struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
550 if (src->tb_rate == params->rate) {
551 dst->tb_credits_per_period = 1;
554 double tb_rate = ((double) src->tb_rate) / ((double) params->rate);
555 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
557 rte_approx(tb_rate, d, &dst->tb_credits_per_period, &dst->tb_period);
559 dst->tb_size = src->tb_size;
561 /* Traffic Classes */
562 dst->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
563 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
564 dst->tc_credits_per_period[j] = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
566 #ifdef RTE_SCHED_SUBPORT_TC_OV
567 dst->tc_ov_weight = src->tc_ov_weight;
571 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
572 uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
573 uint32_t lcd, lcd1, lcd2;
576 qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
578 wrr_cost[0] = src->wrr_weights[qindex];
579 wrr_cost[1] = src->wrr_weights[qindex + 1];
580 wrr_cost[2] = src->wrr_weights[qindex + 2];
581 wrr_cost[3] = src->wrr_weights[qindex + 3];
583 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
584 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
585 lcd = rte_get_lcd(lcd1, lcd2);
587 wrr_cost[0] = lcd / wrr_cost[0];
588 wrr_cost[1] = lcd / wrr_cost[1];
589 wrr_cost[2] = lcd / wrr_cost[2];
590 wrr_cost[3] = lcd / wrr_cost[3];
592 dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
593 dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
594 dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
595 dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
598 rte_sched_port_log_pipe_profile(port, i);
601 port->pipe_tc3_rate_max = 0;
602 for (i = 0; i < port->n_pipe_profiles; i ++) {
603 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
604 uint32_t pipe_tc3_rate = src->tc_rate[3];
606 if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
607 port->pipe_tc3_rate_max = pipe_tc3_rate;
612 struct rte_sched_port *
613 rte_sched_port_config(struct rte_sched_port_params *params)
615 struct rte_sched_port *port = NULL;
616 const struct rte_memzone *mz = NULL;
617 uint32_t mem_size, bmp_mem_size, n_queues_per_port, i;
619 /* Check user parameters. Determine the amount of memory to allocate */
620 mem_size = rte_sched_port_get_memory_footprint(params);
625 /* Allocate memory to store the data structures */
626 mz = rte_memzone_lookup(params->name);
628 /* Use existing memzone, provided that its size is big enough */
629 if (mz->len < mem_size) {
633 /* Create new memzone */
634 mz = rte_memzone_reserve(params->name, mem_size, params->socket, 0);
639 memset(mz->addr, 0, mem_size);
640 port = (struct rte_sched_port *) mz->addr;
642 /* User parameters */
643 port->n_subports_per_port = params->n_subports_per_port;
644 port->n_pipes_per_subport = params->n_pipes_per_subport;
645 port->rate = params->rate;
646 port->mtu = params->mtu + params->frame_overhead;
647 port->frame_overhead = params->frame_overhead;
648 memcpy(port->qsize, params->qsize, sizeof(params->qsize));
649 port->n_pipe_profiles = params->n_pipe_profiles;
652 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
655 for (j = 0; j < e_RTE_METER_COLORS; j++) {
656 if (rte_red_config_init(&port->red_config[i][j],
657 params->red_params[i][j].wq_log2,
658 params->red_params[i][j].min_th,
659 params->red_params[i][j].max_th,
660 params->red_params[i][j].maxp_inv) != 0) {
668 port->time_cpu_cycles = rte_get_tsc_cycles();
669 port->time_cpu_bytes = 0;
671 port->cycles_per_byte = ((double) rte_get_tsc_hz()) / ((double) params->rate);
673 /* Scheduling loop detection */
674 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
675 port->pipe_exhaustion = 0;
678 port->busy_grinders = 0;
679 port->pkts_out = NULL;
680 port->n_pkts_out = 0;
682 /* Queue base calculation */
683 rte_sched_port_config_qsize(port);
685 /* Large data structures */
686 port->subport = (struct rte_sched_subport *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
687 port->pipe = (struct rte_sched_pipe *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
688 port->queue = (struct rte_sched_queue *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
689 port->queue_extra = (struct rte_sched_queue_extra *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
690 port->pipe_profiles = (struct rte_sched_pipe_profile *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
691 port->bmp_array = port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
692 port->queue_array = (struct rte_mbuf **) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
694 /* Pipe profile table */
695 rte_sched_port_config_pipe_profile_table(port, params);
698 n_queues_per_port = rte_sched_port_queues_per_port(port);
699 bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
700 port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array, bmp_mem_size);
701 if (port->bmp == NULL) {
702 RTE_LOG(INFO, SCHED, "Bitmap init error\n");
705 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
706 port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
713 rte_sched_port_free(struct rte_sched_port *port)
715 /* Check user parameters */
719 rte_bitmap_free(port->bmp);
725 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
727 struct rte_sched_subport *s = port->subport + i;
729 RTE_LOG(INFO, SCHED, "Low level config for subport %u:\n"
730 "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
731 "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
732 "\tTraffic class 3 oversubscription: wm min = %u, wm max = %u\n",
737 s->tb_credits_per_period,
740 /* Traffic classes */
742 s->tc_credits_per_period[0],
743 s->tc_credits_per_period[1],
744 s->tc_credits_per_period[2],
745 s->tc_credits_per_period[3],
747 /* Traffic class 3 oversubscription */
753 rte_sched_subport_config(struct rte_sched_port *port,
755 struct rte_sched_subport_params *params)
757 struct rte_sched_subport *s;
760 /* Check user parameters */
761 if ((port == NULL) ||
762 (subport_id >= port->n_subports_per_port) ||
767 if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
771 if (params->tb_size == 0) {
775 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
776 if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate)) {
781 if (params->tc_period == 0) {
785 s = port->subport + subport_id;
787 /* Token Bucket (TB) */
788 if (params->tb_rate == port->rate) {
789 s->tb_credits_per_period = 1;
792 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
793 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
795 rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
797 s->tb_size = params->tb_size;
798 s->tb_time = port->time;
799 s->tb_credits = s->tb_size / 2;
801 /* Traffic Classes (TCs) */
802 s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
803 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
804 s->tc_credits_per_period[i] = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
806 s->tc_time = port->time + s->tc_period;
807 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
808 s->tc_credits[i] = s->tc_credits_per_period[i];
811 #ifdef RTE_SCHED_SUBPORT_TC_OV
812 /* TC oversubscription */
813 s->tc_ov_wm_min = port->mtu;
814 s->tc_ov_wm_max = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->pipe_tc3_rate_max);
815 s->tc_ov_wm = s->tc_ov_wm_max;
816 s->tc_ov_period_id = 0;
822 rte_sched_port_log_subport_config(port, subport_id);
828 rte_sched_pipe_config(struct rte_sched_port *port,
831 int32_t pipe_profile)
833 struct rte_sched_subport *s;
834 struct rte_sched_pipe *p;
835 struct rte_sched_pipe_profile *params;
836 uint32_t deactivate, profile, i;
838 /* Check user parameters */
839 profile = (uint32_t) pipe_profile;
840 deactivate = (pipe_profile < 0);
841 if ((port == NULL) ||
842 (subport_id >= port->n_subports_per_port) ||
843 (pipe_id >= port->n_pipes_per_subport) ||
844 ((!deactivate) && (profile >= port->n_pipe_profiles))) {
848 /* Check that subport configuration is valid */
849 s = port->subport + subport_id;
850 if (s->tb_period == 0) {
854 p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
856 /* Handle the case when pipe already has a valid configuration */
858 params = port->pipe_profiles + p->profile;
860 #ifdef RTE_SCHED_SUBPORT_TC_OV
861 double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
862 double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
863 uint32_t tc3_ov = s->tc_ov;
865 /* Unplug pipe from its subport */
866 s->tc_ov_n -= params->tc_ov_weight;
867 s->tc_ov_rate -= pipe_tc3_rate;
868 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
870 if (s->tc_ov != tc3_ov) {
871 RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
872 subport_id, subport_tc3_rate, s->tc_ov_rate);
877 memset(p, 0, sizeof(struct rte_sched_pipe));
884 /* Apply the new pipe configuration */
885 p->profile = profile;
886 params = port->pipe_profiles + p->profile;
888 /* Token Bucket (TB) */
889 p->tb_time = port->time;
890 p->tb_credits = params->tb_size / 2;
892 /* Traffic Classes (TCs) */
893 p->tc_time = port->time + params->tc_period;
894 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
895 p->tc_credits[i] = params->tc_credits_per_period[i];
898 #ifdef RTE_SCHED_SUBPORT_TC_OV
900 /* Subport TC3 oversubscription */
901 double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
902 double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
903 uint32_t tc3_ov = s->tc_ov;
905 s->tc_ov_n += params->tc_ov_weight;
906 s->tc_ov_rate += pipe_tc3_rate;
907 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
909 if (s->tc_ov != tc3_ov) {
910 RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
911 subport_id, subport_tc3_rate, s->tc_ov_rate);
913 p->tc_ov_period_id = s->tc_ov_period_id;
914 p->tc_ov_credits = s->tc_ov_wm;
922 rte_sched_subport_read_stats(struct rte_sched_port *port,
924 struct rte_sched_subport_stats *stats,
927 struct rte_sched_subport *s;
929 /* Check user parameters */
930 if ((port == NULL) ||
931 (subport_id >= port->n_subports_per_port) ||
936 s = port->subport + subport_id;
938 /* Copy subport stats and clear */
939 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
940 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
942 /* Subport TC ovesubscription status */
949 rte_sched_queue_read_stats(struct rte_sched_port *port,
951 struct rte_sched_queue_stats *stats,
954 struct rte_sched_queue *q;
955 struct rte_sched_queue_extra *qe;
957 /* Check user parameters */
958 if ((port == NULL) ||
959 (queue_id >= rte_sched_port_queues_per_port(port)) ||
964 q = port->queue + queue_id;
965 qe = port->queue_extra + queue_id;
967 /* Copy queue stats and clear */
968 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
969 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
972 *qlen = q->qw - q->qr;
977 static inline uint32_t
978 rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
982 result = subport * port->n_pipes_per_subport + pipe;
983 result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
984 result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
989 static inline struct rte_mbuf **
990 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
992 uint32_t pindex = qindex >> 4;
993 uint32_t qpos = qindex & 0xF;
995 return (port->queue_array + pindex * port->qsize_sum + port->qsize_add[qpos]);
998 static inline uint16_t
999 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
1001 uint32_t tc = (qindex >> 2) & 0x3;
1003 return port->qsize[tc];
1009 rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
1011 struct rte_sched_queue *queue = port->queue + qindex;
1013 return (queue->qr == queue->qw);
1017 rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
1019 struct rte_sched_queue *queue = port->queue + qindex;
1020 uint16_t qsize = rte_sched_port_qsize(port, qindex);
1021 uint16_t qlen = q->qw - q->qr;
1023 return (qlen >= qsize);
1026 #endif /* RTE_SCHED_DEBUG */
1028 #ifdef RTE_SCHED_COLLECT_STATS
1031 rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1033 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1034 uint32_t tc_index = (qindex >> 2) & 0x3;
1035 uint32_t pkt_len = pkt->pkt.pkt_len;
1037 s->stats.n_pkts_tc[tc_index] += 1;
1038 s->stats.n_bytes_tc[tc_index] += pkt_len;
1042 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1044 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1045 uint32_t tc_index = (qindex >> 2) & 0x3;
1046 uint32_t pkt_len = pkt->pkt.pkt_len;
1048 s->stats.n_pkts_tc_dropped[tc_index] += 1;
1049 s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1053 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1055 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1056 uint32_t pkt_len = pkt->pkt.pkt_len;
1058 qe->stats.n_pkts += 1;
1059 qe->stats.n_bytes += pkt_len;
1063 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1065 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1066 uint32_t pkt_len = pkt->pkt.pkt_len;
1068 qe->stats.n_pkts_dropped += 1;
1069 qe->stats.n_bytes_dropped += pkt_len;
1072 #endif /* RTE_SCHED_COLLECT_STATS */
1074 #ifdef RTE_SCHED_RED
1077 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
1079 struct rte_sched_queue_extra *qe;
1080 struct rte_red_config *red_cfg;
1081 struct rte_red *red;
1083 enum rte_meter_color color;
1085 tc_index = (qindex >> 2) & 0x3;
1086 color = rte_sched_port_pkt_read_color(pkt);
1087 red_cfg = &port->red_config[tc_index][color];
1089 qe = port->queue_extra + qindex;
1092 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1096 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
1098 struct rte_sched_queue_extra *qe;
1099 struct rte_red *red;
1101 qe = port->queue_extra + qindex;
1104 rte_red_mark_queue_empty(red, port->time);
1109 #define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
1111 #define rte_sched_port_set_queue_empty_timestamp(port, qindex)
1113 #endif /* RTE_SCHED_RED */
1118 debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
1122 qindex = pindex << 4;
1124 for (i = 0; i < 16; i ++){
1125 uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
1126 uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
1128 if (queue_empty != bmp_bit_clear){
1129 rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
1141 debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t bmp_slab)
1147 rte_panic("Empty slab at position %u\n", bmp_pos);
1151 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
1152 if (mask & bmp_slab){
1153 if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
1154 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1161 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1166 #endif /* RTE_SCHED_DEBUG */
1168 static inline uint32_t
1169 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port, struct rte_mbuf *pkt)
1171 struct rte_sched_queue *q;
1172 #ifdef RTE_SCHED_COLLECT_STATS
1173 struct rte_sched_queue_extra *qe;
1175 uint32_t subport, pipe, traffic_class, queue, qindex;
1177 rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
1179 qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1180 q = port->queue + qindex;
1182 #ifdef RTE_SCHED_COLLECT_STATS
1183 qe = port->queue_extra + qindex;
1191 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase)
1193 struct rte_sched_queue *q;
1194 struct rte_mbuf **q_qw;
1197 q = port->queue + qindex;
1198 qsize = rte_sched_port_qsize(port, qindex);
1199 q_qw = qbase + (q->qw & (qsize - 1));
1201 rte_prefetch0(q_qw);
1202 rte_bitmap_prefetch0(port->bmp, qindex);
1206 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
1208 struct rte_sched_queue *q;
1212 q = port->queue + qindex;
1213 qsize = rte_sched_port_qsize(port, qindex);
1214 qlen = q->qw - q->qr;
1216 /* Drop the packet (and update drop stats) when queue is full */
1217 if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) || (qlen >= qsize))) {
1218 rte_pktmbuf_free(pkt);
1219 #ifdef RTE_SCHED_COLLECT_STATS
1220 rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
1221 rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
1226 /* Enqueue packet */
1227 qbase[q->qw & (qsize - 1)] = pkt;
1230 /* Activate queue in the port bitmap */
1231 rte_bitmap_set(port->bmp, qindex);
1234 #ifdef RTE_SCHED_COLLECT_STATS
1235 rte_sched_port_update_subport_stats(port, qindex, pkt);
1236 rte_sched_port_update_queue_stats(port, qindex, pkt);
1242 #if RTE_SCHED_ENQUEUE == 0
1245 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
1251 for (i = 0; i < n_pkts; i ++) {
1252 struct rte_mbuf *pkt;
1253 struct rte_mbuf **q_base;
1254 uint32_t subport, pipe, traffic_class, queue, qindex;
1258 rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
1260 qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1262 q_base = rte_sched_port_qbase(port, qindex);
1264 result += rte_sched_port_enqueue_qwa(port, qindex, q_base, pkt);
1272 /* The enqueue function implements a 4-level pipeline with each stage processing
1273 * two different packets. The purpose of using a pipeline is to hide the latency
1274 * of prefetching the data structures. The naming convention is presented in the
1277 * p00 _______ p10 _______ p20 _______ p30 _______
1278 * ----->| |----->| |----->| |----->| |----->
1279 * | 0 | | 1 | | 2 | | 3 |
1280 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1285 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
1287 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, *pkt30, *pkt31, *pkt_last;
1288 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1289 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1290 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1295 /* Less then 6 input packets available, which is not enough to feed the pipeline */
1296 if (unlikely(n_pkts < 6)) {
1297 struct rte_mbuf **q_base[5];
1300 /* Prefetch the mbuf structure of each packet */
1301 for (i = 0; i < n_pkts; i ++) {
1302 rte_prefetch0(pkts[i]);
1305 /* Prefetch the queue structure for each queue */
1306 for (i = 0; i < n_pkts; i ++) {
1307 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
1310 /* Prefetch the write pointer location of each queue */
1311 for (i = 0; i < n_pkts; i ++) {
1312 q_base[i] = rte_sched_port_qbase(port, q[i]);
1313 rte_sched_port_enqueue_qwa_prefetch0(port, q[i], q_base[i]);
1316 /* Write each packet to its queue */
1317 for (i = 0; i < n_pkts; i ++) {
1318 result += rte_sched_port_enqueue_qwa(port, q[i], q_base[i], pkts[i]);
1324 /* Feed the first 3 stages of the pipeline (6 packets needed) */
1327 rte_prefetch0(pkt20);
1328 rte_prefetch0(pkt21);
1332 rte_prefetch0(pkt10);
1333 rte_prefetch0(pkt11);
1335 q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
1336 q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
1340 rte_prefetch0(pkt00);
1341 rte_prefetch0(pkt01);
1343 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1344 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1346 q20_base = rte_sched_port_qbase(port, q20);
1347 q21_base = rte_sched_port_qbase(port, q21);
1348 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1349 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1351 /* Run the pipeline */
1352 for (i = 6; i < (n_pkts & (~1)); i += 2) {
1353 /* Propagate stage inputs */
1364 q30_base = q20_base;
1365 q31_base = q21_base;
1367 /* Stage 0: Get packets in */
1369 pkt01 = pkts[i + 1];
1370 rte_prefetch0(pkt00);
1371 rte_prefetch0(pkt01);
1373 /* Stage 1: Prefetch queue structure storing queue pointers */
1374 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1375 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1377 /* Stage 2: Prefetch queue write location */
1378 q20_base = rte_sched_port_qbase(port, q20);
1379 q21_base = rte_sched_port_qbase(port, q21);
1380 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1381 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1383 /* Stage 3: Write packet to queue and activate queue */
1384 r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
1385 r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
1386 result += r30 + r31;
1389 /* Drain the pipeline (exactly 6 packets). Handle the last packet in the case
1390 of an odd number of input packets. */
1391 pkt_last = pkts[n_pkts - 1];
1392 rte_prefetch0(pkt_last);
1394 q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
1395 q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
1397 q10_base = rte_sched_port_qbase(port, q10);
1398 q11_base = rte_sched_port_qbase(port, q11);
1399 rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
1400 rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
1402 r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
1403 r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
1404 result += r20 + r21;
1406 q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
1408 q00_base = rte_sched_port_qbase(port, q00);
1409 q01_base = rte_sched_port_qbase(port, q01);
1410 rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
1411 rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
1413 r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
1414 r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
1415 result += r10 + r11;
1417 q_last_base = rte_sched_port_qbase(port, q_last);
1418 rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
1420 r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
1421 r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
1422 result += r00 + r01;
1425 r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
1432 #endif /* RTE_SCHED_ENQUEUE */
1434 #if RTE_SCHED_TS_CREDITS_UPDATE == 0
1436 #define grinder_credits_update(port, pos)
1438 #elif !defined(RTE_SCHED_SUBPORT_TC_OV)
1441 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1443 struct rte_sched_grinder *grinder = port->grinder + pos;
1444 struct rte_sched_subport *subport = grinder->subport;
1445 struct rte_sched_pipe *pipe = grinder->pipe;
1446 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1450 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1451 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1452 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1453 subport->tb_time += n_periods * subport->tb_period;
1456 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1457 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1458 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1459 pipe->tb_time += n_periods * params->tb_period;
1462 if (unlikely(port->time >= subport->tc_time)) {
1463 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1464 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1465 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1466 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1467 subport->tc_time = port->time + subport->tc_period;
1471 if (unlikely(port->time >= pipe->tc_time)) {
1472 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1473 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1474 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1475 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1476 pipe->tc_time = port->time + params->tc_period;
1482 static inline uint32_t
1483 grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
1485 struct rte_sched_grinder *grinder = port->grinder + pos;
1486 struct rte_sched_subport *subport = grinder->subport;
1487 uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
1488 uint32_t tc_ov_consumption_max;
1489 uint32_t tc_ov_wm = subport->tc_ov_wm;
1491 if (subport->tc_ov == 0) {
1492 return subport->tc_ov_wm_max;
1495 tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
1496 tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
1497 tc_ov_consumption[2] = subport->tc_credits_per_period[2] - subport->tc_credits[2];
1498 tc_ov_consumption[3] = subport->tc_credits_per_period[3] - subport->tc_credits[3];
1500 tc_ov_consumption_max = subport->tc_credits_per_period[3] -
1501 (tc_ov_consumption[0] + tc_ov_consumption[1] + tc_ov_consumption[2]);
1503 if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
1504 tc_ov_wm -= tc_ov_wm >> 7;
1505 if (tc_ov_wm < subport->tc_ov_wm_min) {
1506 tc_ov_wm = subport->tc_ov_wm_min;
1511 tc_ov_wm += (tc_ov_wm >> 7) + 1;
1512 if (tc_ov_wm > subport->tc_ov_wm_max) {
1513 tc_ov_wm = subport->tc_ov_wm_max;
1519 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1521 struct rte_sched_grinder *grinder = port->grinder + pos;
1522 struct rte_sched_subport *subport = grinder->subport;
1523 struct rte_sched_pipe *pipe = grinder->pipe;
1524 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1528 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1529 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1530 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1531 subport->tb_time += n_periods * subport->tb_period;
1534 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1535 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1536 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1537 pipe->tb_time += n_periods * params->tb_period;
1540 if (unlikely(port->time >= subport->tc_time)) {
1541 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
1543 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1544 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1545 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1546 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1548 subport->tc_time = port->time + subport->tc_period;
1549 subport->tc_ov_period_id ++;
1553 if (unlikely(port->time >= pipe->tc_time)) {
1554 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1555 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1556 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1557 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1558 pipe->tc_time = port->time + params->tc_period;
1561 /* Pipe TCs - Oversubscription */
1562 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
1563 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
1565 pipe->tc_ov_period_id = subport->tc_ov_period_id;
1569 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
1571 #if RTE_SCHED_TS_CREDITS_CHECK
1573 #ifndef RTE_SCHED_SUBPORT_TC_OV
1576 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1578 struct rte_sched_grinder *grinder = port->grinder + pos;
1579 struct rte_sched_subport *subport = grinder->subport;
1580 struct rte_sched_pipe *pipe = grinder->pipe;
1581 struct rte_mbuf *pkt = grinder->pkt;
1582 uint32_t tc_index = grinder->tc_index;
1583 uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
1584 uint32_t subport_tb_credits = subport->tb_credits;
1585 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1586 uint32_t pipe_tb_credits = pipe->tb_credits;
1587 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1590 /* Check queue credits */
1591 enough_credits = (pkt_len <= subport_tb_credits) &&
1592 (pkt_len <= subport_tc_credits) &&
1593 (pkt_len <= pipe_tb_credits) &&
1594 (pkt_len <= pipe_tc_credits);
1596 if (!enough_credits) {
1600 /* Update port credits */
1601 subport->tb_credits -= pkt_len;
1602 subport->tc_credits[tc_index] -= pkt_len;
1603 pipe->tb_credits -= pkt_len;
1604 pipe->tc_credits[tc_index] -= pkt_len;
1612 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1614 struct rte_sched_grinder *grinder = port->grinder + pos;
1615 struct rte_sched_subport *subport = grinder->subport;
1616 struct rte_sched_pipe *pipe = grinder->pipe;
1617 struct rte_mbuf *pkt = grinder->pkt;
1618 uint32_t tc_index = grinder->tc_index;
1619 uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
1620 uint32_t subport_tb_credits = subport->tb_credits;
1621 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1622 uint32_t pipe_tb_credits = pipe->tb_credits;
1623 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1624 uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits};
1625 uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
1626 uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
1629 /* Check pipe and subport credits */
1630 enough_credits = (pkt_len <= subport_tb_credits) &&
1631 (pkt_len <= subport_tc_credits) &&
1632 (pkt_len <= pipe_tb_credits) &&
1633 (pkt_len <= pipe_tc_credits) &&
1634 (pkt_len <= pipe_tc_ov_credits);
1636 if (!enough_credits) {
1640 /* Update pipe and subport credits */
1641 subport->tb_credits -= pkt_len;
1642 subport->tc_credits[tc_index] -= pkt_len;
1643 pipe->tb_credits -= pkt_len;
1644 pipe->tc_credits[tc_index] -= pkt_len;
1645 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
1650 #endif /* RTE_SCHED_SUBPORT_TC_OV */
1652 #endif /* RTE_SCHED_TS_CREDITS_CHECK */
1655 grinder_schedule(struct rte_sched_port *port, uint32_t pos)
1657 struct rte_sched_grinder *grinder = port->grinder + pos;
1658 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
1659 struct rte_mbuf *pkt = grinder->pkt;
1660 uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
1662 #if RTE_SCHED_TS_CREDITS_CHECK
1663 if (!grinder_credits_check(port, pos)) {
1668 /* Advance port time */
1669 port->time += pkt_len;
1672 port->pkts_out[port->n_pkts_out ++] = pkt;
1674 grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
1675 if (queue->qr == queue->qw) {
1676 uint32_t qindex = grinder->qindex[grinder->qpos];
1678 rte_bitmap_clear(port->bmp, qindex);
1679 grinder->qmask &= ~(1 << grinder->qpos);
1680 grinder->wrr_mask[grinder->qpos] = 0;
1681 rte_sched_port_set_queue_empty_timestamp(port, qindex);
1684 /* Reset pipe loop detection */
1685 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1686 grinder->productive = 1;
1691 #if RTE_SCHED_OPTIMIZATIONS
1694 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1696 __m128i index = _mm_set1_epi32 (base_pipe);
1697 __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
1698 __m128i res = _mm_cmpeq_epi32(pipes, index);
1699 pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
1700 pipes = _mm_cmpeq_epi32(pipes, index);
1701 res = _mm_or_si128(res, pipes);
1703 if (_mm_testz_si128(res, res))
1712 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1716 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
1717 if (port->grinder_base_bmp_pos[i] == base_pipe) {
1725 #endif /* RTE_SCHED_OPTIMIZATIONS */
1728 grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
1730 struct rte_sched_grinder *grinder = port->grinder + pos;
1733 grinder->pcache_w = 0;
1734 grinder->pcache_r = 0;
1736 w[0] = (uint16_t) bmp_slab;
1737 w[1] = (uint16_t) (bmp_slab >> 16);
1738 w[2] = (uint16_t) (bmp_slab >> 32);
1739 w[3] = (uint16_t) (bmp_slab >> 48);
1741 grinder->pcache_qmask[grinder->pcache_w] = w[0];
1742 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
1743 grinder->pcache_w += (w[0] != 0);
1745 grinder->pcache_qmask[grinder->pcache_w] = w[1];
1746 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
1747 grinder->pcache_w += (w[1] != 0);
1749 grinder->pcache_qmask[grinder->pcache_w] = w[2];
1750 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
1751 grinder->pcache_w += (w[2] != 0);
1753 grinder->pcache_qmask[grinder->pcache_w] = w[3];
1754 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
1755 grinder->pcache_w += (w[3] != 0);
1759 grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
1761 struct rte_sched_grinder *grinder = port->grinder + pos;
1764 grinder->tccache_w = 0;
1765 grinder->tccache_r = 0;
1767 b[0] = (uint8_t) (qmask & 0xF);
1768 b[1] = (uint8_t) ((qmask >> 4) & 0xF);
1769 b[2] = (uint8_t) ((qmask >> 8) & 0xF);
1770 b[3] = (uint8_t) ((qmask >> 12) & 0xF);
1772 grinder->tccache_qmask[grinder->tccache_w] = b[0];
1773 grinder->tccache_qindex[grinder->tccache_w] = qindex;
1774 grinder->tccache_w += (b[0] != 0);
1776 grinder->tccache_qmask[grinder->tccache_w] = b[1];
1777 grinder->tccache_qindex[grinder->tccache_w] = qindex + 4;
1778 grinder->tccache_w += (b[1] != 0);
1780 grinder->tccache_qmask[grinder->tccache_w] = b[2];
1781 grinder->tccache_qindex[grinder->tccache_w] = qindex + 8;
1782 grinder->tccache_w += (b[2] != 0);
1784 grinder->tccache_qmask[grinder->tccache_w] = b[3];
1785 grinder->tccache_qindex[grinder->tccache_w] = qindex + 12;
1786 grinder->tccache_w += (b[3] != 0);
1790 grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
1792 struct rte_sched_grinder *grinder = port->grinder + pos;
1793 struct rte_mbuf **qbase;
1797 if (grinder->tccache_r == grinder->tccache_w) {
1801 qindex = grinder->tccache_qindex[grinder->tccache_r];
1802 qbase = rte_sched_port_qbase(port, qindex);
1803 qsize = rte_sched_port_qsize(port, qindex);
1805 grinder->tc_index = (qindex >> 2) & 0x3;
1806 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
1807 grinder->qsize = qsize;
1809 grinder->qindex[0] = qindex;
1810 grinder->qindex[1] = qindex + 1;
1811 grinder->qindex[2] = qindex + 2;
1812 grinder->qindex[3] = qindex + 3;
1814 grinder->queue[0] = port->queue + qindex;
1815 grinder->queue[1] = port->queue + qindex + 1;
1816 grinder->queue[2] = port->queue + qindex + 2;
1817 grinder->queue[3] = port->queue + qindex + 3;
1819 grinder->qbase[0] = qbase;
1820 grinder->qbase[1] = qbase + qsize;
1821 grinder->qbase[2] = qbase + 2 * qsize;
1822 grinder->qbase[3] = qbase + 3 * qsize;
1824 grinder->tccache_r ++;
1829 grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
1831 struct rte_sched_grinder *grinder = port->grinder + pos;
1832 uint32_t pipe_qindex;
1833 uint16_t pipe_qmask;
1835 if (grinder->pcache_r < grinder->pcache_w) {
1836 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
1837 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
1838 grinder->pcache_r ++;
1840 uint64_t bmp_slab = 0;
1841 uint32_t bmp_pos = 0;
1843 /* Get another non-empty pipe group */
1844 if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0)) {
1849 debug_check_queue_slab(port, bmp_pos, bmp_slab);
1852 /* Return if pipe group already in one of the other grinders */
1853 port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
1854 if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
1857 port->grinder_base_bmp_pos[pos] = bmp_pos;
1859 /* Install new pipe group into grinder's pipe cache */
1860 grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
1862 pipe_qmask = grinder->pcache_qmask[0];
1863 pipe_qindex = grinder->pcache_qindex[0];
1864 grinder->pcache_r = 1;
1867 /* Install new pipe in the grinder */
1868 grinder->pindex = pipe_qindex >> 4;
1869 grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
1870 grinder->pipe = port->pipe + grinder->pindex;
1871 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
1872 grinder->productive = 0;
1874 grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
1875 grinder_next_tc(port, pos);
1877 /* Check for pipe exhaustion */
1878 if (grinder->pindex == port->pipe_loop) {
1879 port->pipe_exhaustion = 1;
1880 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1886 #if RTE_SCHED_WRR == 0
1888 #define grinder_wrr_load(a,b)
1890 #define grinder_wrr_store(a,b)
1893 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
1895 struct rte_sched_grinder *grinder = port->grinder + pos;
1896 uint64_t slab = grinder->qmask;
1898 if (rte_bsf64(slab, &grinder->qpos) == 0) {
1899 rte_panic("grinder wrr\n");
1903 #elif RTE_SCHED_WRR == 1
1906 grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
1908 struct rte_sched_grinder *grinder = port->grinder + pos;
1909 struct rte_sched_pipe *pipe = grinder->pipe;
1910 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
1911 uint32_t tc_index = grinder->tc_index;
1912 uint32_t qmask = grinder->qmask;
1915 qindex = tc_index * 4;
1917 grinder->wrr_tokens[0] = ((uint16_t) pipe->wrr_tokens[qindex]) << RTE_SCHED_WRR_SHIFT;
1918 grinder->wrr_tokens[1] = ((uint16_t) pipe->wrr_tokens[qindex + 1]) << RTE_SCHED_WRR_SHIFT;
1919 grinder->wrr_tokens[2] = ((uint16_t) pipe->wrr_tokens[qindex + 2]) << RTE_SCHED_WRR_SHIFT;
1920 grinder->wrr_tokens[3] = ((uint16_t) pipe->wrr_tokens[qindex + 3]) << RTE_SCHED_WRR_SHIFT;
1922 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
1923 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
1924 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
1925 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
1927 grinder->wrr_cost[0] = pipe_params->wrr_cost[qindex];
1928 grinder->wrr_cost[1] = pipe_params->wrr_cost[qindex + 1];
1929 grinder->wrr_cost[2] = pipe_params->wrr_cost[qindex + 2];
1930 grinder->wrr_cost[3] = pipe_params->wrr_cost[qindex + 3];
1934 grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
1936 struct rte_sched_grinder *grinder = port->grinder + pos;
1937 struct rte_sched_pipe *pipe = grinder->pipe;
1938 uint32_t tc_index = grinder->tc_index;
1941 qindex = tc_index * 4;
1943 pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
1944 pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
1945 pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
1946 pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
1950 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
1952 struct rte_sched_grinder *grinder = port->grinder + pos;
1953 uint16_t wrr_tokens_min;
1955 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
1956 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
1957 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
1958 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
1960 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
1961 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
1963 grinder->wrr_tokens[0] -= wrr_tokens_min;
1964 grinder->wrr_tokens[1] -= wrr_tokens_min;
1965 grinder->wrr_tokens[2] -= wrr_tokens_min;
1966 grinder->wrr_tokens[3] -= wrr_tokens_min;
1971 #error Invalid value for RTE_SCHED_WRR
1973 #endif /* RTE_SCHED_WRR */
1975 #define grinder_evict(port, pos)
1978 grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
1980 struct rte_sched_grinder *grinder = port->grinder + pos;
1982 rte_prefetch0(grinder->pipe);
1983 rte_prefetch0(grinder->queue[0]);
1987 grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
1989 struct rte_sched_grinder *grinder = port->grinder + pos;
1990 uint16_t qsize, qr[4];
1992 qsize = grinder->qsize;
1993 qr[0] = grinder->queue[0]->qr & (qsize - 1);
1994 qr[1] = grinder->queue[1]->qr & (qsize - 1);
1995 qr[2] = grinder->queue[2]->qr & (qsize - 1);
1996 qr[3] = grinder->queue[3]->qr & (qsize - 1);
1998 rte_prefetch0(grinder->qbase[0] + qr[0]);
1999 rte_prefetch0(grinder->qbase[1] + qr[1]);
2001 grinder_wrr_load(port, pos);
2002 grinder_wrr(port, pos);
2004 rte_prefetch0(grinder->qbase[2] + qr[2]);
2005 rte_prefetch0(grinder->qbase[3] + qr[3]);
2009 grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
2011 struct rte_sched_grinder *grinder = port->grinder + pos;
2012 uint32_t qpos = grinder->qpos;
2013 struct rte_mbuf **qbase = grinder->qbase[qpos];
2014 uint16_t qsize = grinder->qsize;
2015 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2017 grinder->pkt = qbase[qr];
2018 rte_prefetch0(grinder->pkt);
2020 if (unlikely((qr & 0x7) == 7)) {
2021 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2023 rte_prefetch0(qbase + qr_next);
2027 static inline uint32_t
2028 grinder_handle(struct rte_sched_port *port, uint32_t pos)
2030 struct rte_sched_grinder *grinder = port->grinder + pos;
2032 switch (grinder->state) {
2033 case e_GRINDER_PREFETCH_PIPE:
2035 if (grinder_next_pipe(port, pos)) {
2036 grinder_prefetch_pipe(port, pos);
2037 port->busy_grinders ++;
2039 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2046 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2048 struct rte_sched_pipe *pipe = grinder->pipe;
2050 grinder->pipe_params = port->pipe_profiles + pipe->profile;
2051 grinder_prefetch_tc_queue_arrays(port, pos);
2052 grinder_credits_update(port, pos);
2054 grinder->state = e_GRINDER_PREFETCH_MBUF;
2058 case e_GRINDER_PREFETCH_MBUF:
2060 grinder_prefetch_mbuf(port, pos);
2062 grinder->state = e_GRINDER_READ_MBUF;
2066 case e_GRINDER_READ_MBUF:
2068 uint32_t result = 0;
2070 result = grinder_schedule(port, pos);
2072 /* Look for next packet within the same TC */
2073 if (result && grinder->qmask) {
2074 grinder_wrr(port, pos);
2075 grinder_prefetch_mbuf(port, pos);
2079 grinder_wrr_store(port, pos);
2081 /* Look for another active TC within same pipe */
2082 if (grinder_next_tc(port, pos)) {
2083 grinder_prefetch_tc_queue_arrays(port, pos);
2085 grinder->state = e_GRINDER_PREFETCH_MBUF;
2088 if ((grinder->productive == 0) && (port->pipe_loop == RTE_SCHED_PIPE_INVALID)) {
2089 port->pipe_loop = grinder->pindex;
2091 grinder_evict(port, pos);
2093 /* Look for another active pipe */
2094 if (grinder_next_pipe(port, pos)) {
2095 grinder_prefetch_pipe(port, pos);
2097 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2101 /* No active pipe found */
2102 port->busy_grinders --;
2104 grinder->state = e_GRINDER_PREFETCH_PIPE;
2109 rte_panic("Algorithmic error (invalid state)\n");
2115 rte_sched_port_time_resync(struct rte_sched_port *port)
2117 uint64_t cycles = rte_get_tsc_cycles();
2118 uint64_t cycles_diff = cycles - port->time_cpu_cycles;
2119 double bytes_diff = ((double) cycles_diff) / port->cycles_per_byte;
2121 /* Advance port time */
2122 port->time_cpu_cycles = cycles;
2123 port->time_cpu_bytes += (uint64_t) bytes_diff;
2124 if (port->time < port->time_cpu_bytes) {
2125 port->time = port->time_cpu_bytes;
2128 /* Reset pipe loop detection */
2129 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
2133 rte_sched_port_exceptions(struct rte_sched_port *port)
2137 /* Check if any exception flag is set */
2138 exceptions = (port->busy_grinders == 0) ||
2139 (port->pipe_exhaustion == 1);
2141 /* Clear exception flags */
2142 port->pipe_exhaustion = 0;
2148 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2152 port->pkts_out = pkts;
2153 port->n_pkts_out = 0;
2155 rte_sched_port_time_resync(port);
2157 /* Take each queue in the grinder one step further */
2158 for (i = 0, count = 0; ; i ++) {
2159 count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2160 if ((count == n_pkts) || rte_sched_port_exceptions(port)) {