4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_common.h>
39 #include <rte_memory.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42 #include <rte_prefetch.h>
43 #include <rte_branch_prediction.h>
46 #include "rte_sched.h"
47 #include "rte_bitmap.h"
48 #include "rte_sched_common.h"
49 #include "rte_approx.h"
50 #include "rte_reciprocal.h"
52 #ifdef __INTEL_COMPILER
53 #pragma warning(disable:2259) /* conversion may lose significant bits */
56 #ifdef RTE_SCHED_VECTOR
60 #define SCHED_VECTOR_SSE4
65 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
66 #define RTE_SCHED_WRR_SHIFT 3
67 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
68 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
69 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
71 /* Scaling for cycles_per_byte calculation
72 * Chosen so that minimum rate is 480 bit/sec
74 #define RTE_SCHED_TIME_SHIFT 8
76 struct rte_sched_subport {
77 /* Token bucket (TB) */
78 uint64_t tb_time; /* time of last update */
80 uint32_t tb_credits_per_period;
84 /* Traffic classes (TCs) */
85 uint64_t tc_time; /* time of next update */
86 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
87 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
90 /* TC oversubscription */
92 uint32_t tc_ov_wm_min;
93 uint32_t tc_ov_wm_max;
94 uint8_t tc_ov_period_id;
100 struct rte_sched_subport_stats stats;
103 struct rte_sched_pipe_profile {
104 /* Token bucket (TB) */
106 uint32_t tb_credits_per_period;
109 /* Pipe traffic classes */
111 uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
112 uint8_t tc_ov_weight;
115 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE];
118 struct rte_sched_pipe {
119 /* Token bucket (TB) */
120 uint64_t tb_time; /* time of last update */
123 /* Pipe profile and flags */
126 /* Traffic classes (TCs) */
127 uint64_t tc_time; /* time of next update */
128 uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
130 /* Weighted Round Robin (WRR) */
131 uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE];
133 /* TC oversubscription */
134 uint32_t tc_ov_credits;
135 uint8_t tc_ov_period_id;
137 } __rte_cache_aligned;
139 struct rte_sched_queue {
144 struct rte_sched_queue_extra {
145 struct rte_sched_queue_stats stats;
152 e_GRINDER_PREFETCH_PIPE = 0,
153 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
154 e_GRINDER_PREFETCH_MBUF,
159 * Path through the scheduler hierarchy used by the scheduler enqueue
160 * operation to identify the destination queue for the current
161 * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
162 * each packet, typically written by the classification stage and read
163 * by scheduler enqueue.
165 struct rte_sched_port_hierarchy {
166 uint16_t queue:2; /**< Queue ID (0 .. 3) */
167 uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
168 uint32_t color:2; /**< Color */
170 uint16_t subport; /**< Subport ID */
171 uint32_t pipe; /**< Pipe ID */
174 struct rte_sched_grinder {
176 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
177 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
182 enum grinder_state state;
185 struct rte_sched_subport *subport;
186 struct rte_sched_pipe *pipe;
187 struct rte_sched_pipe_profile *pipe_params;
190 uint8_t tccache_qmask[4];
191 uint32_t tccache_qindex[4];
197 struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
198 struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
199 uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
203 struct rte_mbuf *pkt;
206 uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
207 uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
208 uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
211 struct rte_sched_port {
212 /* User parameters */
213 uint32_t n_subports_per_port;
214 uint32_t n_pipes_per_subport;
217 uint32_t frame_overhead;
218 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
219 uint32_t n_pipe_profiles;
220 uint32_t pipe_tc3_rate_max;
222 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
226 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
227 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
228 uint64_t time; /* Current NIC TX time measured in bytes */
229 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
231 /* Scheduling loop detection */
233 uint32_t pipe_exhaustion;
236 struct rte_bitmap *bmp;
237 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
240 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
241 uint32_t busy_grinders;
242 struct rte_mbuf **pkts_out;
245 /* Queue base calculation */
246 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
249 /* Large data structures */
250 struct rte_sched_subport *subport;
251 struct rte_sched_pipe *pipe;
252 struct rte_sched_queue *queue;
253 struct rte_sched_queue_extra *queue_extra;
254 struct rte_sched_pipe_profile *pipe_profiles;
256 struct rte_mbuf **queue_array;
257 uint8_t memory[0] __rte_cache_aligned;
258 } __rte_cache_aligned;
260 enum rte_sched_port_array {
261 e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
262 e_RTE_SCHED_PORT_ARRAY_PIPE,
263 e_RTE_SCHED_PORT_ARRAY_QUEUE,
264 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
265 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
266 e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
267 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
268 e_RTE_SCHED_PORT_ARRAY_TOTAL,
271 #ifdef RTE_SCHED_COLLECT_STATS
273 static inline uint32_t
274 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
276 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
281 static inline uint32_t
282 rte_sched_port_queues_per_port(struct rte_sched_port *port)
284 return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
287 static inline struct rte_mbuf **
288 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
290 uint32_t pindex = qindex >> 4;
291 uint32_t qpos = qindex & 0xF;
293 return (port->queue_array + pindex *
294 port->qsize_sum + port->qsize_add[qpos]);
297 static inline uint16_t
298 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
300 uint32_t tc = (qindex >> 2) & 0x3;
302 return port->qsize[tc];
306 rte_sched_port_check_params(struct rte_sched_port_params *params)
314 if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES))
318 if (params->rate == 0)
322 if (params->mtu == 0)
325 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
326 if (params->n_subports_per_port == 0 ||
327 params->n_subports_per_port > 1u << 16 ||
328 !rte_is_power_of_2(params->n_subports_per_port))
331 /* n_pipes_per_subport: non-zero, power of 2 */
332 if (params->n_pipes_per_subport == 0 ||
333 !rte_is_power_of_2(params->n_pipes_per_subport))
336 /* qsize: non-zero, power of 2,
337 * no bigger than 32K (due to 16-bit read/write pointers)
339 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
340 uint16_t qsize = params->qsize[i];
342 if (qsize == 0 || !rte_is_power_of_2(qsize))
346 /* pipe_profiles and n_pipe_profiles */
347 if (params->pipe_profiles == NULL ||
348 params->n_pipe_profiles == 0 ||
349 params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)
352 for (i = 0; i < params->n_pipe_profiles; i++) {
353 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
355 /* TB rate: non-zero, not greater than port rate */
356 if (p->tb_rate == 0 || p->tb_rate > params->rate)
359 /* TB size: non-zero */
363 /* TC rate: non-zero, less than pipe rate */
364 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
365 if (p->tc_rate[j] == 0 || p->tc_rate[j] > p->tb_rate)
369 /* TC period: non-zero */
370 if (p->tc_period == 0)
373 #ifdef RTE_SCHED_SUBPORT_TC_OV
374 /* TC3 oversubscription weight: non-zero */
375 if (p->tc_ov_weight == 0)
379 /* Queue WRR weights: non-zero */
380 for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
381 if (p->wrr_weights[j] == 0)
390 rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
392 uint32_t n_subports_per_port = params->n_subports_per_port;
393 uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
394 uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
395 uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
397 uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
398 uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
399 uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
400 uint32_t size_queue_extra
401 = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
402 uint32_t size_pipe_profiles
403 = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
404 uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
405 uint32_t size_per_pipe_queue_array, size_queue_array;
409 size_per_pipe_queue_array = 0;
410 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
411 size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
412 * params->qsize[i] * sizeof(struct rte_mbuf *);
414 size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
418 if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT)
420 base += RTE_CACHE_LINE_ROUNDUP(size_subport);
422 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE)
424 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
426 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE)
428 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
430 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA)
432 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
434 if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES)
436 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
438 if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY)
440 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
442 if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY)
444 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
450 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
452 uint32_t size0, size1;
455 status = rte_sched_port_check_params(params);
457 RTE_LOG(NOTICE, SCHED,
458 "Port scheduler params check failed (%d)\n", status);
463 size0 = sizeof(struct rte_sched_port);
464 size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
466 return size0 + size1;
470 rte_sched_port_config_qsize(struct rte_sched_port *port)
473 port->qsize_add[0] = 0;
474 port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
475 port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
476 port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
479 port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
480 port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
481 port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
482 port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
485 port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
486 port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
487 port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
488 port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
491 port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
492 port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
493 port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
494 port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
496 port->qsize_sum = port->qsize_add[15] + port->qsize[3];
500 rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
502 struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
504 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
505 " Token bucket: period = %u, credits per period = %u, size = %u\n"
506 " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
507 " Traffic class 3 oversubscription: weight = %hhu\n"
508 " WRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n",
513 p->tb_credits_per_period,
516 /* Traffic classes */
518 p->tc_credits_per_period[0],
519 p->tc_credits_per_period[1],
520 p->tc_credits_per_period[2],
521 p->tc_credits_per_period[3],
523 /* Traffic class 3 oversubscription */
527 p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3],
528 p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7],
529 p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11],
530 p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]);
533 static inline uint64_t
534 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
536 uint64_t time = time_ms;
538 time = (time * rate) / 1000;
544 rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
548 for (i = 0; i < port->n_pipe_profiles; i++) {
549 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
550 struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
553 if (src->tb_rate == params->rate) {
554 dst->tb_credits_per_period = 1;
557 double tb_rate = (double) src->tb_rate
558 / (double) params->rate;
559 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
561 rte_approx(tb_rate, d,
562 &dst->tb_credits_per_period, &dst->tb_period);
564 dst->tb_size = src->tb_size;
566 /* Traffic Classes */
567 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
570 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++)
571 dst->tc_credits_per_period[j]
572 = rte_sched_time_ms_to_bytes(src->tc_period,
575 #ifdef RTE_SCHED_SUBPORT_TC_OV
576 dst->tc_ov_weight = src->tc_ov_weight;
580 for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
581 uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
582 uint32_t lcd, lcd1, lcd2;
585 qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
587 wrr_cost[0] = src->wrr_weights[qindex];
588 wrr_cost[1] = src->wrr_weights[qindex + 1];
589 wrr_cost[2] = src->wrr_weights[qindex + 2];
590 wrr_cost[3] = src->wrr_weights[qindex + 3];
592 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
593 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
594 lcd = rte_get_lcd(lcd1, lcd2);
596 wrr_cost[0] = lcd / wrr_cost[0];
597 wrr_cost[1] = lcd / wrr_cost[1];
598 wrr_cost[2] = lcd / wrr_cost[2];
599 wrr_cost[3] = lcd / wrr_cost[3];
601 dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
602 dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
603 dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
604 dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
607 rte_sched_port_log_pipe_profile(port, i);
610 port->pipe_tc3_rate_max = 0;
611 for (i = 0; i < port->n_pipe_profiles; i++) {
612 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
613 uint32_t pipe_tc3_rate = src->tc_rate[3];
615 if (port->pipe_tc3_rate_max < pipe_tc3_rate)
616 port->pipe_tc3_rate_max = pipe_tc3_rate;
620 struct rte_sched_port *
621 rte_sched_port_config(struct rte_sched_port_params *params)
623 struct rte_sched_port *port = NULL;
624 uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
626 /* Check user parameters. Determine the amount of memory to allocate */
627 mem_size = rte_sched_port_get_memory_footprint(params);
631 /* Allocate memory to store the data structures */
632 port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
636 /* compile time checks */
637 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
638 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1));
640 /* User parameters */
641 port->n_subports_per_port = params->n_subports_per_port;
642 port->n_pipes_per_subport = params->n_pipes_per_subport;
643 port->rate = params->rate;
644 port->mtu = params->mtu + params->frame_overhead;
645 port->frame_overhead = params->frame_overhead;
646 memcpy(port->qsize, params->qsize, sizeof(params->qsize));
647 port->n_pipe_profiles = params->n_pipe_profiles;
650 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
653 for (j = 0; j < e_RTE_METER_COLORS; j++) {
654 /* if min/max are both zero, then RED is disabled */
655 if ((params->red_params[i][j].min_th |
656 params->red_params[i][j].max_th) == 0) {
660 if (rte_red_config_init(&port->red_config[i][j],
661 params->red_params[i][j].wq_log2,
662 params->red_params[i][j].min_th,
663 params->red_params[i][j].max_th,
664 params->red_params[i][j].maxp_inv) != 0) {
672 port->time_cpu_cycles = rte_get_tsc_cycles();
673 port->time_cpu_bytes = 0;
676 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
678 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
680 /* Scheduling loop detection */
681 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
682 port->pipe_exhaustion = 0;
685 port->busy_grinders = 0;
686 port->pkts_out = NULL;
687 port->n_pkts_out = 0;
689 /* Queue base calculation */
690 rte_sched_port_config_qsize(port);
692 /* Large data structures */
693 port->subport = (struct rte_sched_subport *)
694 (port->memory + rte_sched_port_get_array_base(params,
695 e_RTE_SCHED_PORT_ARRAY_SUBPORT));
696 port->pipe = (struct rte_sched_pipe *)
697 (port->memory + rte_sched_port_get_array_base(params,
698 e_RTE_SCHED_PORT_ARRAY_PIPE));
699 port->queue = (struct rte_sched_queue *)
700 (port->memory + rte_sched_port_get_array_base(params,
701 e_RTE_SCHED_PORT_ARRAY_QUEUE));
702 port->queue_extra = (struct rte_sched_queue_extra *)
703 (port->memory + rte_sched_port_get_array_base(params,
704 e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
705 port->pipe_profiles = (struct rte_sched_pipe_profile *)
706 (port->memory + rte_sched_port_get_array_base(params,
707 e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
708 port->bmp_array = port->memory
709 + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
710 port->queue_array = (struct rte_mbuf **)
711 (port->memory + rte_sched_port_get_array_base(params,
712 e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
714 /* Pipe profile table */
715 rte_sched_port_config_pipe_profile_table(port, params);
718 n_queues_per_port = rte_sched_port_queues_per_port(port);
719 bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
720 port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array,
722 if (port->bmp == NULL) {
723 RTE_LOG(ERR, SCHED, "Bitmap init error\n");
727 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
728 port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
735 rte_sched_port_free(struct rte_sched_port *port)
739 /* Check user parameters */
743 /* Free enqueued mbufs */
744 for (queue = 0; queue < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; queue++) {
745 struct rte_mbuf **mbufs = rte_sched_port_qbase(port, queue);
748 for (i = 0; i < rte_sched_port_qsize(port, queue); i++)
749 rte_pktmbuf_free(mbufs[i]);
752 rte_bitmap_free(port->bmp);
757 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
759 struct rte_sched_subport *s = port->subport + i;
761 RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
762 " Token bucket: period = %u, credits per period = %u, size = %u\n"
763 " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
764 " Traffic class 3 oversubscription: wm min = %u, wm max = %u\n",
769 s->tb_credits_per_period,
772 /* Traffic classes */
774 s->tc_credits_per_period[0],
775 s->tc_credits_per_period[1],
776 s->tc_credits_per_period[2],
777 s->tc_credits_per_period[3],
779 /* Traffic class 3 oversubscription */
785 rte_sched_subport_config(struct rte_sched_port *port,
787 struct rte_sched_subport_params *params)
789 struct rte_sched_subport *s;
792 /* Check user parameters */
794 subport_id >= port->n_subports_per_port ||
798 if (params->tb_rate == 0 || params->tb_rate > port->rate)
801 if (params->tb_size == 0)
804 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
805 if (params->tc_rate[i] == 0 ||
806 params->tc_rate[i] > params->tb_rate)
810 if (params->tc_period == 0)
813 s = port->subport + subport_id;
815 /* Token Bucket (TB) */
816 if (params->tb_rate == port->rate) {
817 s->tb_credits_per_period = 1;
820 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
821 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
823 rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
826 s->tb_size = params->tb_size;
827 s->tb_time = port->time;
828 s->tb_credits = s->tb_size / 2;
830 /* Traffic Classes (TCs) */
831 s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
832 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
833 s->tc_credits_per_period[i]
834 = rte_sched_time_ms_to_bytes(params->tc_period,
837 s->tc_time = port->time + s->tc_period;
838 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
839 s->tc_credits[i] = s->tc_credits_per_period[i];
841 #ifdef RTE_SCHED_SUBPORT_TC_OV
842 /* TC oversubscription */
843 s->tc_ov_wm_min = port->mtu;
844 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
845 port->pipe_tc3_rate_max);
846 s->tc_ov_wm = s->tc_ov_wm_max;
847 s->tc_ov_period_id = 0;
853 rte_sched_port_log_subport_config(port, subport_id);
859 rte_sched_pipe_config(struct rte_sched_port *port,
862 int32_t pipe_profile)
864 struct rte_sched_subport *s;
865 struct rte_sched_pipe *p;
866 struct rte_sched_pipe_profile *params;
867 uint32_t deactivate, profile, i;
869 /* Check user parameters */
870 profile = (uint32_t) pipe_profile;
871 deactivate = (pipe_profile < 0);
874 subport_id >= port->n_subports_per_port ||
875 pipe_id >= port->n_pipes_per_subport ||
876 (!deactivate && profile >= port->n_pipe_profiles))
880 /* Check that subport configuration is valid */
881 s = port->subport + subport_id;
882 if (s->tb_period == 0)
885 p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
887 /* Handle the case when pipe already has a valid configuration */
889 params = port->pipe_profiles + p->profile;
891 #ifdef RTE_SCHED_SUBPORT_TC_OV
892 double subport_tc3_rate = (double) s->tc_credits_per_period[3]
893 / (double) s->tc_period;
894 double pipe_tc3_rate = (double) params->tc_credits_per_period[3]
895 / (double) params->tc_period;
896 uint32_t tc3_ov = s->tc_ov;
898 /* Unplug pipe from its subport */
899 s->tc_ov_n -= params->tc_ov_weight;
900 s->tc_ov_rate -= pipe_tc3_rate;
901 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
903 if (s->tc_ov != tc3_ov) {
904 RTE_LOG(DEBUG, SCHED,
905 "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
906 subport_id, subport_tc3_rate, s->tc_ov_rate);
911 memset(p, 0, sizeof(struct rte_sched_pipe));
917 /* Apply the new pipe configuration */
918 p->profile = profile;
919 params = port->pipe_profiles + p->profile;
921 /* Token Bucket (TB) */
922 p->tb_time = port->time;
923 p->tb_credits = params->tb_size / 2;
925 /* Traffic Classes (TCs) */
926 p->tc_time = port->time + params->tc_period;
927 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
928 p->tc_credits[i] = params->tc_credits_per_period[i];
930 #ifdef RTE_SCHED_SUBPORT_TC_OV
932 /* Subport TC3 oversubscription */
933 double subport_tc3_rate = (double) s->tc_credits_per_period[3]
934 / (double) s->tc_period;
935 double pipe_tc3_rate = (double) params->tc_credits_per_period[3]
936 / (double) params->tc_period;
937 uint32_t tc3_ov = s->tc_ov;
939 s->tc_ov_n += params->tc_ov_weight;
940 s->tc_ov_rate += pipe_tc3_rate;
941 s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
943 if (s->tc_ov != tc3_ov) {
944 RTE_LOG(DEBUG, SCHED,
945 "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
946 subport_id, subport_tc3_rate, s->tc_ov_rate);
948 p->tc_ov_period_id = s->tc_ov_period_id;
949 p->tc_ov_credits = s->tc_ov_wm;
957 rte_sched_port_pkt_write(struct rte_mbuf *pkt,
958 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
959 uint32_t queue, enum rte_meter_color color)
961 struct rte_sched_port_hierarchy *sched
962 = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
964 RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
966 sched->color = (uint32_t) color;
967 sched->subport = subport;
969 sched->traffic_class = traffic_class;
970 sched->queue = queue;
974 rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
975 uint32_t *subport, uint32_t *pipe,
976 uint32_t *traffic_class, uint32_t *queue)
978 const struct rte_sched_port_hierarchy *sched
979 = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
981 *subport = sched->subport;
983 *traffic_class = sched->traffic_class;
984 *queue = sched->queue;
988 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
990 const struct rte_sched_port_hierarchy *sched
991 = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
993 return (enum rte_meter_color) sched->color;
997 rte_sched_subport_read_stats(struct rte_sched_port *port,
999 struct rte_sched_subport_stats *stats,
1002 struct rte_sched_subport *s;
1004 /* Check user parameters */
1005 if (port == NULL || subport_id >= port->n_subports_per_port ||
1006 stats == NULL || tc_ov == NULL)
1009 s = port->subport + subport_id;
1011 /* Copy subport stats and clear */
1012 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1013 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1015 /* Subport TC ovesubscription status */
1022 rte_sched_queue_read_stats(struct rte_sched_port *port,
1024 struct rte_sched_queue_stats *stats,
1027 struct rte_sched_queue *q;
1028 struct rte_sched_queue_extra *qe;
1030 /* Check user parameters */
1031 if ((port == NULL) ||
1032 (queue_id >= rte_sched_port_queues_per_port(port)) ||
1037 q = port->queue + queue_id;
1038 qe = port->queue_extra + queue_id;
1040 /* Copy queue stats and clear */
1041 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1042 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1045 *qlen = q->qw - q->qr;
1050 static inline uint32_t
1051 rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
1055 result = subport * port->n_pipes_per_subport + pipe;
1056 result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
1057 result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
1062 #ifdef RTE_SCHED_DEBUG
1065 rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
1067 struct rte_sched_queue *queue = port->queue + qindex;
1069 return queue->qr == queue->qw;
1072 #endif /* RTE_SCHED_DEBUG */
1074 #ifdef RTE_SCHED_COLLECT_STATS
1077 rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1079 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1080 uint32_t tc_index = (qindex >> 2) & 0x3;
1081 uint32_t pkt_len = pkt->pkt_len;
1083 s->stats.n_pkts_tc[tc_index] += 1;
1084 s->stats.n_bytes_tc[tc_index] += pkt_len;
1088 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1090 struct rte_mbuf *pkt, uint32_t red)
1092 struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
1093 uint32_t tc_index = (qindex >> 2) & 0x3;
1094 uint32_t pkt_len = pkt->pkt_len;
1096 s->stats.n_pkts_tc_dropped[tc_index] += 1;
1097 s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1098 #ifdef RTE_SCHED_RED
1099 s->stats.n_pkts_red_dropped[tc_index] += red;
1104 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
1106 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1107 uint32_t pkt_len = pkt->pkt_len;
1109 qe->stats.n_pkts += 1;
1110 qe->stats.n_bytes += pkt_len;
1114 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
1116 struct rte_mbuf *pkt, uint32_t red)
1118 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1119 uint32_t pkt_len = pkt->pkt_len;
1121 qe->stats.n_pkts_dropped += 1;
1122 qe->stats.n_bytes_dropped += pkt_len;
1123 #ifdef RTE_SCHED_RED
1124 qe->stats.n_pkts_red_dropped += red;
1128 #endif /* RTE_SCHED_COLLECT_STATS */
1130 #ifdef RTE_SCHED_RED
1133 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
1135 struct rte_sched_queue_extra *qe;
1136 struct rte_red_config *red_cfg;
1137 struct rte_red *red;
1139 enum rte_meter_color color;
1141 tc_index = (qindex >> 2) & 0x3;
1142 color = rte_sched_port_pkt_read_color(pkt);
1143 red_cfg = &port->red_config[tc_index][color];
1145 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1148 qe = port->queue_extra + qindex;
1151 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1155 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
1157 struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
1158 struct rte_red *red = &qe->red;
1160 rte_red_mark_queue_empty(red, port->time);
1165 #define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
1167 #define rte_sched_port_set_queue_empty_timestamp(port, qindex)
1169 #endif /* RTE_SCHED_RED */
1171 #ifdef RTE_SCHED_DEBUG
1174 debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos,
1181 rte_panic("Empty slab at position %u\n", bmp_pos);
1184 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1185 if (mask & bmp_slab) {
1186 if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
1187 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1194 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1198 #endif /* RTE_SCHED_DEBUG */
1200 static inline uint32_t
1201 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,
1202 struct rte_mbuf *pkt)
1204 struct rte_sched_queue *q;
1205 #ifdef RTE_SCHED_COLLECT_STATS
1206 struct rte_sched_queue_extra *qe;
1208 uint32_t subport, pipe, traffic_class, queue, qindex;
1210 rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
1212 qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1213 q = port->queue + qindex;
1215 #ifdef RTE_SCHED_COLLECT_STATS
1216 qe = port->queue_extra + qindex;
1224 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1225 uint32_t qindex, struct rte_mbuf **qbase)
1227 struct rte_sched_queue *q;
1228 struct rte_mbuf **q_qw;
1231 q = port->queue + qindex;
1232 qsize = rte_sched_port_qsize(port, qindex);
1233 q_qw = qbase + (q->qw & (qsize - 1));
1235 rte_prefetch0(q_qw);
1236 rte_bitmap_prefetch0(port->bmp, qindex);
1240 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex,
1241 struct rte_mbuf **qbase, struct rte_mbuf *pkt)
1243 struct rte_sched_queue *q;
1247 q = port->queue + qindex;
1248 qsize = rte_sched_port_qsize(port, qindex);
1249 qlen = q->qw - q->qr;
1251 /* Drop the packet (and update drop stats) when queue is full */
1252 if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) ||
1254 rte_pktmbuf_free(pkt);
1255 #ifdef RTE_SCHED_COLLECT_STATS
1256 rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt,
1258 rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt,
1264 /* Enqueue packet */
1265 qbase[q->qw & (qsize - 1)] = pkt;
1268 /* Activate queue in the port bitmap */
1269 rte_bitmap_set(port->bmp, qindex);
1272 #ifdef RTE_SCHED_COLLECT_STATS
1273 rte_sched_port_update_subport_stats(port, qindex, pkt);
1274 rte_sched_port_update_queue_stats(port, qindex, pkt);
1282 * The enqueue function implements a 4-level pipeline with each stage
1283 * processing two different packets. The purpose of using a pipeline
1284 * is to hide the latency of prefetching the data structures. The
1285 * naming convention is presented in the diagram below:
1287 * p00 _______ p10 _______ p20 _______ p30 _______
1288 * ----->| |----->| |----->| |----->| |----->
1289 * | 0 | | 1 | | 2 | | 3 |
1290 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1295 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
1298 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
1299 *pkt30, *pkt31, *pkt_last;
1300 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
1301 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1302 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1303 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1309 * Less then 6 input packets available, which is not enough to
1312 if (unlikely(n_pkts < 6)) {
1313 struct rte_mbuf **q_base[5];
1316 /* Prefetch the mbuf structure of each packet */
1317 for (i = 0; i < n_pkts; i++)
1318 rte_prefetch0(pkts[i]);
1320 /* Prefetch the queue structure for each queue */
1321 for (i = 0; i < n_pkts; i++)
1322 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port,
1325 /* Prefetch the write pointer location of each queue */
1326 for (i = 0; i < n_pkts; i++) {
1327 q_base[i] = rte_sched_port_qbase(port, q[i]);
1328 rte_sched_port_enqueue_qwa_prefetch0(port, q[i],
1332 /* Write each packet to its queue */
1333 for (i = 0; i < n_pkts; i++)
1334 result += rte_sched_port_enqueue_qwa(port, q[i],
1335 q_base[i], pkts[i]);
1340 /* Feed the first 3 stages of the pipeline (6 packets needed) */
1343 rte_prefetch0(pkt20);
1344 rte_prefetch0(pkt21);
1348 rte_prefetch0(pkt10);
1349 rte_prefetch0(pkt11);
1351 q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
1352 q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
1356 rte_prefetch0(pkt00);
1357 rte_prefetch0(pkt01);
1359 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1360 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1362 q20_base = rte_sched_port_qbase(port, q20);
1363 q21_base = rte_sched_port_qbase(port, q21);
1364 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1365 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1367 /* Run the pipeline */
1368 for (i = 6; i < (n_pkts & (~1)); i += 2) {
1369 /* Propagate stage inputs */
1380 q30_base = q20_base;
1381 q31_base = q21_base;
1383 /* Stage 0: Get packets in */
1385 pkt01 = pkts[i + 1];
1386 rte_prefetch0(pkt00);
1387 rte_prefetch0(pkt01);
1389 /* Stage 1: Prefetch queue structure storing queue pointers */
1390 q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
1391 q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
1393 /* Stage 2: Prefetch queue write location */
1394 q20_base = rte_sched_port_qbase(port, q20);
1395 q21_base = rte_sched_port_qbase(port, q21);
1396 rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
1397 rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
1399 /* Stage 3: Write packet to queue and activate queue */
1400 r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
1401 r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
1402 result += r30 + r31;
1406 * Drain the pipeline (exactly 6 packets).
1407 * Handle the last packet in the case
1408 * of an odd number of input packets.
1410 pkt_last = pkts[n_pkts - 1];
1411 rte_prefetch0(pkt_last);
1413 q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
1414 q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
1416 q10_base = rte_sched_port_qbase(port, q10);
1417 q11_base = rte_sched_port_qbase(port, q11);
1418 rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
1419 rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
1421 r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
1422 r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
1423 result += r20 + r21;
1425 q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
1427 q00_base = rte_sched_port_qbase(port, q00);
1428 q01_base = rte_sched_port_qbase(port, q01);
1429 rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
1430 rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
1432 r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
1433 r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
1434 result += r10 + r11;
1436 q_last_base = rte_sched_port_qbase(port, q_last);
1437 rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
1439 r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
1440 r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
1441 result += r00 + r01;
1444 r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
1451 #ifndef RTE_SCHED_SUBPORT_TC_OV
1454 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1456 struct rte_sched_grinder *grinder = port->grinder + pos;
1457 struct rte_sched_subport *subport = grinder->subport;
1458 struct rte_sched_pipe *pipe = grinder->pipe;
1459 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1463 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1464 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1465 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1466 subport->tb_time += n_periods * subport->tb_period;
1469 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1470 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1471 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1472 pipe->tb_time += n_periods * params->tb_period;
1475 if (unlikely(port->time >= subport->tc_time)) {
1476 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1477 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1478 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1479 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1480 subport->tc_time = port->time + subport->tc_period;
1484 if (unlikely(port->time >= pipe->tc_time)) {
1485 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1486 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1487 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1488 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1489 pipe->tc_time = port->time + params->tc_period;
1495 static inline uint32_t
1496 grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
1498 struct rte_sched_grinder *grinder = port->grinder + pos;
1499 struct rte_sched_subport *subport = grinder->subport;
1500 uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
1501 uint32_t tc_ov_consumption_max;
1502 uint32_t tc_ov_wm = subport->tc_ov_wm;
1504 if (subport->tc_ov == 0)
1505 return subport->tc_ov_wm_max;
1507 tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
1508 tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
1509 tc_ov_consumption[2] = subport->tc_credits_per_period[2] - subport->tc_credits[2];
1510 tc_ov_consumption[3] = subport->tc_credits_per_period[3] - subport->tc_credits[3];
1512 tc_ov_consumption_max = subport->tc_credits_per_period[3] -
1513 (tc_ov_consumption[0] + tc_ov_consumption[1] + tc_ov_consumption[2]);
1515 if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
1516 tc_ov_wm -= tc_ov_wm >> 7;
1517 if (tc_ov_wm < subport->tc_ov_wm_min)
1518 tc_ov_wm = subport->tc_ov_wm_min;
1523 tc_ov_wm += (tc_ov_wm >> 7) + 1;
1524 if (tc_ov_wm > subport->tc_ov_wm_max)
1525 tc_ov_wm = subport->tc_ov_wm_max;
1531 grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
1533 struct rte_sched_grinder *grinder = port->grinder + pos;
1534 struct rte_sched_subport *subport = grinder->subport;
1535 struct rte_sched_pipe *pipe = grinder->pipe;
1536 struct rte_sched_pipe_profile *params = grinder->pipe_params;
1540 n_periods = (port->time - subport->tb_time) / subport->tb_period;
1541 subport->tb_credits += n_periods * subport->tb_credits_per_period;
1542 subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
1543 subport->tb_time += n_periods * subport->tb_period;
1546 n_periods = (port->time - pipe->tb_time) / params->tb_period;
1547 pipe->tb_credits += n_periods * params->tb_credits_per_period;
1548 pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
1549 pipe->tb_time += n_periods * params->tb_period;
1552 if (unlikely(port->time >= subport->tc_time)) {
1553 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
1555 subport->tc_credits[0] = subport->tc_credits_per_period[0];
1556 subport->tc_credits[1] = subport->tc_credits_per_period[1];
1557 subport->tc_credits[2] = subport->tc_credits_per_period[2];
1558 subport->tc_credits[3] = subport->tc_credits_per_period[3];
1560 subport->tc_time = port->time + subport->tc_period;
1561 subport->tc_ov_period_id++;
1565 if (unlikely(port->time >= pipe->tc_time)) {
1566 pipe->tc_credits[0] = params->tc_credits_per_period[0];
1567 pipe->tc_credits[1] = params->tc_credits_per_period[1];
1568 pipe->tc_credits[2] = params->tc_credits_per_period[2];
1569 pipe->tc_credits[3] = params->tc_credits_per_period[3];
1570 pipe->tc_time = port->time + params->tc_period;
1573 /* Pipe TCs - Oversubscription */
1574 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
1575 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
1577 pipe->tc_ov_period_id = subport->tc_ov_period_id;
1581 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
1584 #ifndef RTE_SCHED_SUBPORT_TC_OV
1587 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1589 struct rte_sched_grinder *grinder = port->grinder + pos;
1590 struct rte_sched_subport *subport = grinder->subport;
1591 struct rte_sched_pipe *pipe = grinder->pipe;
1592 struct rte_mbuf *pkt = grinder->pkt;
1593 uint32_t tc_index = grinder->tc_index;
1594 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1595 uint32_t subport_tb_credits = subport->tb_credits;
1596 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1597 uint32_t pipe_tb_credits = pipe->tb_credits;
1598 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1601 /* Check queue credits */
1602 enough_credits = (pkt_len <= subport_tb_credits) &&
1603 (pkt_len <= subport_tc_credits) &&
1604 (pkt_len <= pipe_tb_credits) &&
1605 (pkt_len <= pipe_tc_credits);
1607 if (!enough_credits)
1610 /* Update port credits */
1611 subport->tb_credits -= pkt_len;
1612 subport->tc_credits[tc_index] -= pkt_len;
1613 pipe->tb_credits -= pkt_len;
1614 pipe->tc_credits[tc_index] -= pkt_len;
1622 grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
1624 struct rte_sched_grinder *grinder = port->grinder + pos;
1625 struct rte_sched_subport *subport = grinder->subport;
1626 struct rte_sched_pipe *pipe = grinder->pipe;
1627 struct rte_mbuf *pkt = grinder->pkt;
1628 uint32_t tc_index = grinder->tc_index;
1629 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1630 uint32_t subport_tb_credits = subport->tb_credits;
1631 uint32_t subport_tc_credits = subport->tc_credits[tc_index];
1632 uint32_t pipe_tb_credits = pipe->tb_credits;
1633 uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
1634 uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits};
1635 uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
1636 uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
1639 /* Check pipe and subport credits */
1640 enough_credits = (pkt_len <= subport_tb_credits) &&
1641 (pkt_len <= subport_tc_credits) &&
1642 (pkt_len <= pipe_tb_credits) &&
1643 (pkt_len <= pipe_tc_credits) &&
1644 (pkt_len <= pipe_tc_ov_credits);
1646 if (!enough_credits)
1649 /* Update pipe and subport credits */
1650 subport->tb_credits -= pkt_len;
1651 subport->tc_credits[tc_index] -= pkt_len;
1652 pipe->tb_credits -= pkt_len;
1653 pipe->tc_credits[tc_index] -= pkt_len;
1654 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
1659 #endif /* RTE_SCHED_SUBPORT_TC_OV */
1663 grinder_schedule(struct rte_sched_port *port, uint32_t pos)
1665 struct rte_sched_grinder *grinder = port->grinder + pos;
1666 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
1667 struct rte_mbuf *pkt = grinder->pkt;
1668 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
1670 if (!grinder_credits_check(port, pos))
1673 /* Advance port time */
1674 port->time += pkt_len;
1677 port->pkts_out[port->n_pkts_out++] = pkt;
1679 grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
1680 if (queue->qr == queue->qw) {
1681 uint32_t qindex = grinder->qindex[grinder->qpos];
1683 rte_bitmap_clear(port->bmp, qindex);
1684 grinder->qmask &= ~(1 << grinder->qpos);
1685 grinder->wrr_mask[grinder->qpos] = 0;
1686 rte_sched_port_set_queue_empty_timestamp(port, qindex);
1689 /* Reset pipe loop detection */
1690 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1691 grinder->productive = 1;
1696 #ifdef SCHED_VECTOR_SSE4
1699 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1701 __m128i index = _mm_set1_epi32(base_pipe);
1702 __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
1703 __m128i res = _mm_cmpeq_epi32(pipes, index);
1705 pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
1706 pipes = _mm_cmpeq_epi32(pipes, index);
1707 res = _mm_or_si128(res, pipes);
1709 if (_mm_testz_si128(res, res))
1718 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
1722 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
1723 if (port->grinder_base_bmp_pos[i] == base_pipe)
1730 #endif /* RTE_SCHED_OPTIMIZATIONS */
1733 grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
1735 struct rte_sched_grinder *grinder = port->grinder + pos;
1738 grinder->pcache_w = 0;
1739 grinder->pcache_r = 0;
1741 w[0] = (uint16_t) bmp_slab;
1742 w[1] = (uint16_t) (bmp_slab >> 16);
1743 w[2] = (uint16_t) (bmp_slab >> 32);
1744 w[3] = (uint16_t) (bmp_slab >> 48);
1746 grinder->pcache_qmask[grinder->pcache_w] = w[0];
1747 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
1748 grinder->pcache_w += (w[0] != 0);
1750 grinder->pcache_qmask[grinder->pcache_w] = w[1];
1751 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
1752 grinder->pcache_w += (w[1] != 0);
1754 grinder->pcache_qmask[grinder->pcache_w] = w[2];
1755 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
1756 grinder->pcache_w += (w[2] != 0);
1758 grinder->pcache_qmask[grinder->pcache_w] = w[3];
1759 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
1760 grinder->pcache_w += (w[3] != 0);
1764 grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
1766 struct rte_sched_grinder *grinder = port->grinder + pos;
1769 grinder->tccache_w = 0;
1770 grinder->tccache_r = 0;
1772 b[0] = (uint8_t) (qmask & 0xF);
1773 b[1] = (uint8_t) ((qmask >> 4) & 0xF);
1774 b[2] = (uint8_t) ((qmask >> 8) & 0xF);
1775 b[3] = (uint8_t) ((qmask >> 12) & 0xF);
1777 grinder->tccache_qmask[grinder->tccache_w] = b[0];
1778 grinder->tccache_qindex[grinder->tccache_w] = qindex;
1779 grinder->tccache_w += (b[0] != 0);
1781 grinder->tccache_qmask[grinder->tccache_w] = b[1];
1782 grinder->tccache_qindex[grinder->tccache_w] = qindex + 4;
1783 grinder->tccache_w += (b[1] != 0);
1785 grinder->tccache_qmask[grinder->tccache_w] = b[2];
1786 grinder->tccache_qindex[grinder->tccache_w] = qindex + 8;
1787 grinder->tccache_w += (b[2] != 0);
1789 grinder->tccache_qmask[grinder->tccache_w] = b[3];
1790 grinder->tccache_qindex[grinder->tccache_w] = qindex + 12;
1791 grinder->tccache_w += (b[3] != 0);
1795 grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
1797 struct rte_sched_grinder *grinder = port->grinder + pos;
1798 struct rte_mbuf **qbase;
1802 if (grinder->tccache_r == grinder->tccache_w)
1805 qindex = grinder->tccache_qindex[grinder->tccache_r];
1806 qbase = rte_sched_port_qbase(port, qindex);
1807 qsize = rte_sched_port_qsize(port, qindex);
1809 grinder->tc_index = (qindex >> 2) & 0x3;
1810 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
1811 grinder->qsize = qsize;
1813 grinder->qindex[0] = qindex;
1814 grinder->qindex[1] = qindex + 1;
1815 grinder->qindex[2] = qindex + 2;
1816 grinder->qindex[3] = qindex + 3;
1818 grinder->queue[0] = port->queue + qindex;
1819 grinder->queue[1] = port->queue + qindex + 1;
1820 grinder->queue[2] = port->queue + qindex + 2;
1821 grinder->queue[3] = port->queue + qindex + 3;
1823 grinder->qbase[0] = qbase;
1824 grinder->qbase[1] = qbase + qsize;
1825 grinder->qbase[2] = qbase + 2 * qsize;
1826 grinder->qbase[3] = qbase + 3 * qsize;
1828 grinder->tccache_r++;
1833 grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
1835 struct rte_sched_grinder *grinder = port->grinder + pos;
1836 uint32_t pipe_qindex;
1837 uint16_t pipe_qmask;
1839 if (grinder->pcache_r < grinder->pcache_w) {
1840 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
1841 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
1842 grinder->pcache_r++;
1844 uint64_t bmp_slab = 0;
1845 uint32_t bmp_pos = 0;
1847 /* Get another non-empty pipe group */
1848 if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0))
1851 #ifdef RTE_SCHED_DEBUG
1852 debug_check_queue_slab(port, bmp_pos, bmp_slab);
1855 /* Return if pipe group already in one of the other grinders */
1856 port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
1857 if (unlikely(grinder_pipe_exists(port, bmp_pos)))
1860 port->grinder_base_bmp_pos[pos] = bmp_pos;
1862 /* Install new pipe group into grinder's pipe cache */
1863 grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
1865 pipe_qmask = grinder->pcache_qmask[0];
1866 pipe_qindex = grinder->pcache_qindex[0];
1867 grinder->pcache_r = 1;
1870 /* Install new pipe in the grinder */
1871 grinder->pindex = pipe_qindex >> 4;
1872 grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
1873 grinder->pipe = port->pipe + grinder->pindex;
1874 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
1875 grinder->productive = 0;
1877 grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
1878 grinder_next_tc(port, pos);
1880 /* Check for pipe exhaustion */
1881 if (grinder->pindex == port->pipe_loop) {
1882 port->pipe_exhaustion = 1;
1883 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
1891 grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
1893 struct rte_sched_grinder *grinder = port->grinder + pos;
1894 struct rte_sched_pipe *pipe = grinder->pipe;
1895 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
1896 uint32_t tc_index = grinder->tc_index;
1897 uint32_t qmask = grinder->qmask;
1900 qindex = tc_index * 4;
1902 grinder->wrr_tokens[0] = ((uint16_t) pipe->wrr_tokens[qindex]) << RTE_SCHED_WRR_SHIFT;
1903 grinder->wrr_tokens[1] = ((uint16_t) pipe->wrr_tokens[qindex + 1]) << RTE_SCHED_WRR_SHIFT;
1904 grinder->wrr_tokens[2] = ((uint16_t) pipe->wrr_tokens[qindex + 2]) << RTE_SCHED_WRR_SHIFT;
1905 grinder->wrr_tokens[3] = ((uint16_t) pipe->wrr_tokens[qindex + 3]) << RTE_SCHED_WRR_SHIFT;
1907 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
1908 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
1909 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
1910 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
1912 grinder->wrr_cost[0] = pipe_params->wrr_cost[qindex];
1913 grinder->wrr_cost[1] = pipe_params->wrr_cost[qindex + 1];
1914 grinder->wrr_cost[2] = pipe_params->wrr_cost[qindex + 2];
1915 grinder->wrr_cost[3] = pipe_params->wrr_cost[qindex + 3];
1919 grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
1921 struct rte_sched_grinder *grinder = port->grinder + pos;
1922 struct rte_sched_pipe *pipe = grinder->pipe;
1923 uint32_t tc_index = grinder->tc_index;
1926 qindex = tc_index * 4;
1928 pipe->wrr_tokens[qindex] = (grinder->wrr_tokens[0] & grinder->wrr_mask[0])
1929 >> RTE_SCHED_WRR_SHIFT;
1930 pipe->wrr_tokens[qindex + 1] = (grinder->wrr_tokens[1] & grinder->wrr_mask[1])
1931 >> RTE_SCHED_WRR_SHIFT;
1932 pipe->wrr_tokens[qindex + 2] = (grinder->wrr_tokens[2] & grinder->wrr_mask[2])
1933 >> RTE_SCHED_WRR_SHIFT;
1934 pipe->wrr_tokens[qindex + 3] = (grinder->wrr_tokens[3] & grinder->wrr_mask[3])
1935 >> RTE_SCHED_WRR_SHIFT;
1939 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
1941 struct rte_sched_grinder *grinder = port->grinder + pos;
1942 uint16_t wrr_tokens_min;
1944 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
1945 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
1946 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
1947 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
1949 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
1950 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
1952 grinder->wrr_tokens[0] -= wrr_tokens_min;
1953 grinder->wrr_tokens[1] -= wrr_tokens_min;
1954 grinder->wrr_tokens[2] -= wrr_tokens_min;
1955 grinder->wrr_tokens[3] -= wrr_tokens_min;
1959 #define grinder_evict(port, pos)
1962 grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
1964 struct rte_sched_grinder *grinder = port->grinder + pos;
1966 rte_prefetch0(grinder->pipe);
1967 rte_prefetch0(grinder->queue[0]);
1971 grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
1973 struct rte_sched_grinder *grinder = port->grinder + pos;
1974 uint16_t qsize, qr[4];
1976 qsize = grinder->qsize;
1977 qr[0] = grinder->queue[0]->qr & (qsize - 1);
1978 qr[1] = grinder->queue[1]->qr & (qsize - 1);
1979 qr[2] = grinder->queue[2]->qr & (qsize - 1);
1980 qr[3] = grinder->queue[3]->qr & (qsize - 1);
1982 rte_prefetch0(grinder->qbase[0] + qr[0]);
1983 rte_prefetch0(grinder->qbase[1] + qr[1]);
1985 grinder_wrr_load(port, pos);
1986 grinder_wrr(port, pos);
1988 rte_prefetch0(grinder->qbase[2] + qr[2]);
1989 rte_prefetch0(grinder->qbase[3] + qr[3]);
1993 grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
1995 struct rte_sched_grinder *grinder = port->grinder + pos;
1996 uint32_t qpos = grinder->qpos;
1997 struct rte_mbuf **qbase = grinder->qbase[qpos];
1998 uint16_t qsize = grinder->qsize;
1999 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2001 grinder->pkt = qbase[qr];
2002 rte_prefetch0(grinder->pkt);
2004 if (unlikely((qr & 0x7) == 7)) {
2005 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2007 rte_prefetch0(qbase + qr_next);
2011 static inline uint32_t
2012 grinder_handle(struct rte_sched_port *port, uint32_t pos)
2014 struct rte_sched_grinder *grinder = port->grinder + pos;
2016 switch (grinder->state) {
2017 case e_GRINDER_PREFETCH_PIPE:
2019 if (grinder_next_pipe(port, pos)) {
2020 grinder_prefetch_pipe(port, pos);
2021 port->busy_grinders++;
2023 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2030 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2032 struct rte_sched_pipe *pipe = grinder->pipe;
2034 grinder->pipe_params = port->pipe_profiles + pipe->profile;
2035 grinder_prefetch_tc_queue_arrays(port, pos);
2036 grinder_credits_update(port, pos);
2038 grinder->state = e_GRINDER_PREFETCH_MBUF;
2042 case e_GRINDER_PREFETCH_MBUF:
2044 grinder_prefetch_mbuf(port, pos);
2046 grinder->state = e_GRINDER_READ_MBUF;
2050 case e_GRINDER_READ_MBUF:
2052 uint32_t result = 0;
2054 result = grinder_schedule(port, pos);
2056 /* Look for next packet within the same TC */
2057 if (result && grinder->qmask) {
2058 grinder_wrr(port, pos);
2059 grinder_prefetch_mbuf(port, pos);
2063 grinder_wrr_store(port, pos);
2065 /* Look for another active TC within same pipe */
2066 if (grinder_next_tc(port, pos)) {
2067 grinder_prefetch_tc_queue_arrays(port, pos);
2069 grinder->state = e_GRINDER_PREFETCH_MBUF;
2073 if (grinder->productive == 0 &&
2074 port->pipe_loop == RTE_SCHED_PIPE_INVALID)
2075 port->pipe_loop = grinder->pindex;
2077 grinder_evict(port, pos);
2079 /* Look for another active pipe */
2080 if (grinder_next_pipe(port, pos)) {
2081 grinder_prefetch_pipe(port, pos);
2083 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2087 /* No active pipe found */
2088 port->busy_grinders--;
2090 grinder->state = e_GRINDER_PREFETCH_PIPE;
2095 rte_panic("Algorithmic error (invalid state)\n");
2101 rte_sched_port_time_resync(struct rte_sched_port *port)
2103 uint64_t cycles = rte_get_tsc_cycles();
2104 uint64_t cycles_diff = cycles - port->time_cpu_cycles;
2105 uint64_t bytes_diff;
2107 /* Compute elapsed time in bytes */
2108 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2109 port->inv_cycles_per_byte);
2111 /* Advance port time */
2112 port->time_cpu_cycles = cycles;
2113 port->time_cpu_bytes += bytes_diff;
2114 if (port->time < port->time_cpu_bytes)
2115 port->time = port->time_cpu_bytes;
2117 /* Reset pipe loop detection */
2118 port->pipe_loop = RTE_SCHED_PIPE_INVALID;
2122 rte_sched_port_exceptions(struct rte_sched_port *port, int second_pass)
2126 /* Check if any exception flag is set */
2127 exceptions = (second_pass && port->busy_grinders == 0) ||
2128 (port->pipe_exhaustion == 1);
2130 /* Clear exception flags */
2131 port->pipe_exhaustion = 0;
2137 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2141 port->pkts_out = pkts;
2142 port->n_pkts_out = 0;
2144 rte_sched_port_time_resync(port);
2146 /* Take each queue in the grinder one step further */
2147 for (i = 0, count = 0; ; i++) {
2148 count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2149 if ((count == n_pkts) ||
2150 rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {