sched: remove vector functions
[dpdk.git] / lib / sched / rte_sched.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_cycles.h>
12 #include <rte_prefetch.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mbuf.h>
15 #include <rte_bitmap.h>
16 #include <rte_reciprocal.h>
17
18 #include "rte_sched.h"
19 #include "rte_sched_common.h"
20 #include "rte_approx.h"
21
22 #ifdef __INTEL_COMPILER
23 #pragma warning(disable:2259) /* conversion may lose significant bits */
24 #endif
25
26 #define RTE_SCHED_TB_RATE_CONFIG_ERR          (1e-7)
27 #define RTE_SCHED_WRR_SHIFT                   3
28 #define RTE_SCHED_MAX_QUEUES_PER_TC           RTE_SCHED_BE_QUEUES_PER_PIPE
29 #define RTE_SCHED_GRINDER_PCACHE_SIZE         (64 / RTE_SCHED_QUEUES_PER_PIPE)
30 #define RTE_SCHED_PIPE_INVALID                UINT32_MAX
31 #define RTE_SCHED_BMP_POS_INVALID             UINT32_MAX
32
33 /* Scaling for cycles_per_byte calculation
34  * Chosen so that minimum rate is 480 bit/sec
35  */
36 #define RTE_SCHED_TIME_SHIFT                  8
37
38 struct rte_sched_pipe_profile {
39         /* Token bucket (TB) */
40         uint64_t tb_period;
41         uint64_t tb_credits_per_period;
42         uint64_t tb_size;
43
44         /* Pipe traffic classes */
45         uint64_t tc_period;
46         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
47         uint8_t tc_ov_weight;
48
49         /* Pipe best-effort traffic class queues */
50         uint8_t  wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
51 };
52
53 struct rte_sched_pipe {
54         /* Token bucket (TB) */
55         uint64_t tb_time; /* time of last update */
56         uint64_t tb_credits;
57
58         /* Pipe profile and flags */
59         uint32_t profile;
60
61         /* Traffic classes (TCs) */
62         uint64_t tc_time; /* time of next update */
63         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
64
65         /* Weighted Round Robin (WRR) */
66         uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
67
68         /* TC oversubscription */
69         uint64_t tc_ov_credits;
70         uint8_t tc_ov_period_id;
71 } __rte_cache_aligned;
72
73 struct rte_sched_queue {
74         uint16_t qw;
75         uint16_t qr;
76 };
77
78 struct rte_sched_queue_extra {
79         struct rte_sched_queue_stats stats;
80 #ifdef RTE_SCHED_CMAN
81         RTE_STD_C11
82         union {
83                 struct rte_red red;
84                 struct rte_pie pie;
85         };
86 #endif
87 };
88
89 enum grinder_state {
90         e_GRINDER_PREFETCH_PIPE = 0,
91         e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
92         e_GRINDER_PREFETCH_MBUF,
93         e_GRINDER_READ_MBUF
94 };
95
96 struct rte_sched_subport_profile {
97         /* Token bucket (TB) */
98         uint64_t tb_period;
99         uint64_t tb_credits_per_period;
100         uint64_t tb_size;
101
102         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
103         uint64_t tc_period;
104 };
105
106 struct rte_sched_grinder {
107         /* Pipe cache */
108         uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
109         uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
110         uint32_t pcache_w;
111         uint32_t pcache_r;
112
113         /* Current pipe */
114         enum grinder_state state;
115         uint32_t productive;
116         uint32_t pindex;
117         struct rte_sched_subport *subport;
118         struct rte_sched_subport_profile *subport_params;
119         struct rte_sched_pipe *pipe;
120         struct rte_sched_pipe_profile *pipe_params;
121
122         /* TC cache */
123         uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
124         uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
125         uint32_t tccache_w;
126         uint32_t tccache_r;
127
128         /* Current TC */
129         uint32_t tc_index;
130         struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
131         struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
132         uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
133         uint16_t qsize;
134         uint32_t qmask;
135         uint32_t qpos;
136         struct rte_mbuf *pkt;
137
138         /* WRR */
139         uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
140         uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
141         uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
142 };
143
144 struct rte_sched_subport {
145         /* Token bucket (TB) */
146         uint64_t tb_time; /* time of last update */
147         uint64_t tb_credits;
148
149         /* Traffic classes (TCs) */
150         uint64_t tc_time; /* time of next update */
151         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
152
153         /* TC oversubscription */
154         uint64_t tc_ov_wm;
155         uint64_t tc_ov_wm_min;
156         uint64_t tc_ov_wm_max;
157         uint8_t tc_ov_period_id;
158         uint8_t tc_ov;
159         uint32_t tc_ov_n;
160         double tc_ov_rate;
161
162         /* Statistics */
163         struct rte_sched_subport_stats stats __rte_cache_aligned;
164
165         /* subport profile */
166         uint32_t profile;
167         /* Subport pipes */
168         uint32_t n_pipes_per_subport_enabled;
169         uint32_t n_pipe_profiles;
170         uint32_t n_max_pipe_profiles;
171
172         /* Pipe best-effort TC rate */
173         uint64_t pipe_tc_be_rate_max;
174
175         /* Pipe queues size */
176         uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
177
178 #ifdef RTE_SCHED_CMAN
179         bool cman_enabled;
180         enum rte_sched_cman_mode cman;
181
182         RTE_STD_C11
183         union {
184                 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
185                 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
186         };
187 #endif
188
189         /* Scheduling loop detection */
190         uint32_t pipe_loop;
191         uint32_t pipe_exhaustion;
192
193         /* Bitmap */
194         struct rte_bitmap *bmp;
195         uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
196
197         /* Grinders */
198         struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
199         uint32_t busy_grinders;
200
201         /* Queue base calculation */
202         uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
203         uint32_t qsize_sum;
204
205         struct rte_sched_pipe *pipe;
206         struct rte_sched_queue *queue;
207         struct rte_sched_queue_extra *queue_extra;
208         struct rte_sched_pipe_profile *pipe_profiles;
209         uint8_t *bmp_array;
210         struct rte_mbuf **queue_array;
211         uint8_t memory[0] __rte_cache_aligned;
212 } __rte_cache_aligned;
213
214 struct rte_sched_port {
215         /* User parameters */
216         uint32_t n_subports_per_port;
217         uint32_t n_pipes_per_subport;
218         uint32_t n_pipes_per_subport_log2;
219         uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
220         uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
221         uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
222         uint32_t n_subport_profiles;
223         uint32_t n_max_subport_profiles;
224         uint64_t rate;
225         uint32_t mtu;
226         uint32_t frame_overhead;
227         int socket;
228
229         /* Timing */
230         uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cycles */
231         uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
232         uint64_t time;                /* Current NIC TX time measured in bytes */
233         struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
234         uint64_t cycles_per_byte;
235
236         /* Grinders */
237         struct rte_mbuf **pkts_out;
238         uint32_t n_pkts_out;
239         uint32_t subport_id;
240
241         /* Large data structures */
242         struct rte_sched_subport_profile *subport_profiles;
243         struct rte_sched_subport *subports[0] __rte_cache_aligned;
244 } __rte_cache_aligned;
245
246 enum rte_sched_subport_array {
247         e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
248         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
249         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
250         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
251         e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
252         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
253         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
254 };
255
256 static inline uint32_t
257 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
258 {
259         return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
260 }
261
262 static inline struct rte_mbuf **
263 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
264 {
265         uint32_t pindex = qindex >> 4;
266         uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
267
268         return (subport->queue_array + pindex *
269                 subport->qsize_sum + subport->qsize_add[qpos]);
270 }
271
272 static inline uint16_t
273 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
274 struct rte_sched_subport *subport, uint32_t qindex)
275 {
276         uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
277
278         return subport->qsize[tc];
279 }
280
281 static inline uint32_t
282 rte_sched_port_queues_per_port(struct rte_sched_port *port)
283 {
284         uint32_t n_queues = 0, i;
285
286         for (i = 0; i < port->n_subports_per_port; i++)
287                 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
288
289         return n_queues;
290 }
291
292 static inline uint16_t
293 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
294 {
295         uint16_t pipe_queue = port->pipe_queue[traffic_class];
296
297         return pipe_queue;
298 }
299
300 static inline uint8_t
301 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
302 {
303         uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
304
305         return pipe_tc;
306 }
307
308 static inline uint8_t
309 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
310 {
311         uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
312
313         return tc_queue;
314 }
315
316 static int
317 pipe_profile_check(struct rte_sched_pipe_params *params,
318         uint64_t rate, uint16_t *qsize)
319 {
320         uint32_t i;
321
322         /* Pipe parameters */
323         if (params == NULL) {
324                 RTE_LOG(ERR, SCHED,
325                         "%s: Incorrect value for parameter params\n", __func__);
326                 return -EINVAL;
327         }
328
329         /* TB rate: non-zero, not greater than port rate */
330         if (params->tb_rate == 0 ||
331                 params->tb_rate > rate) {
332                 RTE_LOG(ERR, SCHED,
333                         "%s: Incorrect value for tb rate\n", __func__);
334                 return -EINVAL;
335         }
336
337         /* TB size: non-zero */
338         if (params->tb_size == 0) {
339                 RTE_LOG(ERR, SCHED,
340                         "%s: Incorrect value for tb size\n", __func__);
341                 return -EINVAL;
342         }
343
344         /* TC rate: non-zero if qsize non-zero, less than pipe rate */
345         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
346                 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
347                         (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
348                         params->tc_rate[i] > params->tb_rate))) {
349                         RTE_LOG(ERR, SCHED,
350                                 "%s: Incorrect value for qsize or tc_rate\n", __func__);
351                         return -EINVAL;
352                 }
353         }
354
355         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
356                 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
357                 RTE_LOG(ERR, SCHED,
358                         "%s: Incorrect value for be traffic class rate\n", __func__);
359                 return -EINVAL;
360         }
361
362         /* TC period: non-zero */
363         if (params->tc_period == 0) {
364                 RTE_LOG(ERR, SCHED,
365                         "%s: Incorrect value for tc period\n", __func__);
366                 return -EINVAL;
367         }
368
369         /*  Best effort tc oversubscription weight: non-zero */
370         if (params->tc_ov_weight == 0) {
371                 RTE_LOG(ERR, SCHED,
372                         "%s: Incorrect value for tc ov weight\n", __func__);
373                 return -EINVAL;
374         }
375
376         /* Queue WRR weights: non-zero */
377         for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
378                 if (params->wrr_weights[i] == 0) {
379                         RTE_LOG(ERR, SCHED,
380                                 "%s: Incorrect value for wrr weight\n", __func__);
381                         return -EINVAL;
382                 }
383         }
384
385         return 0;
386 }
387
388 static int
389 subport_profile_check(struct rte_sched_subport_profile_params *params,
390         uint64_t rate)
391 {
392         uint32_t i;
393
394         /* Check user parameters */
395         if (params == NULL) {
396                 RTE_LOG(ERR, SCHED, "%s: "
397                 "Incorrect value for parameter params\n", __func__);
398                 return -EINVAL;
399         }
400
401         if (params->tb_rate == 0 || params->tb_rate > rate) {
402                 RTE_LOG(ERR, SCHED, "%s: "
403                 "Incorrect value for tb rate\n", __func__);
404                 return -EINVAL;
405         }
406
407         if (params->tb_size == 0) {
408                 RTE_LOG(ERR, SCHED, "%s: "
409                 "Incorrect value for tb size\n", __func__);
410                 return -EINVAL;
411         }
412
413         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
414                 uint64_t tc_rate = params->tc_rate[i];
415
416                 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
417                         RTE_LOG(ERR, SCHED, "%s: "
418                         "Incorrect value for tc rate\n", __func__);
419                         return -EINVAL;
420                 }
421         }
422
423         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
424                 RTE_LOG(ERR, SCHED, "%s: "
425                 "Incorrect tc rate(best effort)\n", __func__);
426                 return -EINVAL;
427         }
428
429         if (params->tc_period == 0) {
430                 RTE_LOG(ERR, SCHED, "%s: "
431                 "Incorrect value for tc period\n", __func__);
432                 return -EINVAL;
433         }
434
435         return 0;
436 }
437
438 static int
439 rte_sched_port_check_params(struct rte_sched_port_params *params)
440 {
441         uint32_t i;
442
443         if (params == NULL) {
444                 RTE_LOG(ERR, SCHED,
445                         "%s: Incorrect value for parameter params\n", __func__);
446                 return -EINVAL;
447         }
448
449         /* socket */
450         if (params->socket < 0) {
451                 RTE_LOG(ERR, SCHED,
452                         "%s: Incorrect value for socket id\n", __func__);
453                 return -EINVAL;
454         }
455
456         /* rate */
457         if (params->rate == 0) {
458                 RTE_LOG(ERR, SCHED,
459                         "%s: Incorrect value for rate\n", __func__);
460                 return -EINVAL;
461         }
462
463         /* mtu */
464         if (params->mtu == 0) {
465                 RTE_LOG(ERR, SCHED,
466                         "%s: Incorrect value for mtu\n", __func__);
467                 return -EINVAL;
468         }
469
470         /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
471         if (params->n_subports_per_port == 0 ||
472             params->n_subports_per_port > 1u << 16 ||
473             !rte_is_power_of_2(params->n_subports_per_port)) {
474                 RTE_LOG(ERR, SCHED,
475                         "%s: Incorrect value for number of subports\n", __func__);
476                 return -EINVAL;
477         }
478
479         if (params->subport_profiles == NULL ||
480                 params->n_subport_profiles == 0 ||
481                 params->n_max_subport_profiles == 0 ||
482                 params->n_subport_profiles > params->n_max_subport_profiles) {
483                 RTE_LOG(ERR, SCHED,
484                 "%s: Incorrect value for subport profiles\n", __func__);
485                 return -EINVAL;
486         }
487
488         for (i = 0; i < params->n_subport_profiles; i++) {
489                 struct rte_sched_subport_profile_params *p =
490                                                 params->subport_profiles + i;
491                 int status;
492
493                 status = subport_profile_check(p, params->rate);
494                 if (status != 0) {
495                         RTE_LOG(ERR, SCHED,
496                         "%s: subport profile check failed(%d)\n",
497                         __func__, status);
498                         return -EINVAL;
499                 }
500         }
501
502         /* n_pipes_per_subport: non-zero, power of 2 */
503         if (params->n_pipes_per_subport == 0 ||
504             !rte_is_power_of_2(params->n_pipes_per_subport)) {
505                 RTE_LOG(ERR, SCHED,
506                         "%s: Incorrect value for maximum pipes number\n", __func__);
507                 return -EINVAL;
508         }
509
510         return 0;
511 }
512
513 static uint32_t
514 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
515         enum rte_sched_subport_array array)
516 {
517         uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
518         uint32_t n_subport_pipe_queues =
519                 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
520
521         uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
522         uint32_t size_queue =
523                 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
524         uint32_t size_queue_extra
525                 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
526         uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
527                 sizeof(struct rte_sched_pipe_profile);
528         uint32_t size_bmp_array =
529                 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
530         uint32_t size_per_pipe_queue_array, size_queue_array;
531
532         uint32_t base, i;
533
534         size_per_pipe_queue_array = 0;
535         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
536                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
537                         size_per_pipe_queue_array +=
538                                 params->qsize[i] * sizeof(struct rte_mbuf *);
539                 else
540                         size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
541                                 params->qsize[i] * sizeof(struct rte_mbuf *);
542         }
543         size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
544
545         base = 0;
546
547         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
548                 return base;
549         base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
550
551         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
552                 return base;
553         base += RTE_CACHE_LINE_ROUNDUP(size_queue);
554
555         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
556                 return base;
557         base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
558
559         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
560                 return base;
561         base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
562
563         if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
564                 return base;
565         base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
566
567         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
568                 return base;
569         base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
570
571         return base;
572 }
573
574 static void
575 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
576 {
577         uint32_t i;
578
579         subport->qsize_add[0] = 0;
580
581         /* Strict priority traffic class */
582         for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
583                 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
584
585         /* Best-effort traffic class */
586         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
587                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
588                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
589         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
590                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
591                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
592         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
593                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
594                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
595
596         subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
597                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
598 }
599
600 static void
601 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
602 {
603         struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
604
605         RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
606                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
607                 "       Traffic classes: period = %"PRIu64",\n"
608                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
609                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
610                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
611                 "       Best-effort traffic class oversubscription: weight = %hhu\n"
612                 "       WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
613                 i,
614
615                 /* Token bucket */
616                 p->tb_period,
617                 p->tb_credits_per_period,
618                 p->tb_size,
619
620                 /* Traffic classes */
621                 p->tc_period,
622                 p->tc_credits_per_period[0],
623                 p->tc_credits_per_period[1],
624                 p->tc_credits_per_period[2],
625                 p->tc_credits_per_period[3],
626                 p->tc_credits_per_period[4],
627                 p->tc_credits_per_period[5],
628                 p->tc_credits_per_period[6],
629                 p->tc_credits_per_period[7],
630                 p->tc_credits_per_period[8],
631                 p->tc_credits_per_period[9],
632                 p->tc_credits_per_period[10],
633                 p->tc_credits_per_period[11],
634                 p->tc_credits_per_period[12],
635
636                 /* Best-effort traffic class oversubscription */
637                 p->tc_ov_weight,
638
639                 /* WRR */
640                 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
641 }
642
643 static void
644 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
645 {
646         struct rte_sched_subport_profile *p = port->subport_profiles + i;
647
648         RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
649         "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
650         "size = %"PRIu64"\n"
651         "Traffic classes: period = %"PRIu64",\n"
652         "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
653         " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
654         " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
655         i,
656
657         /* Token bucket */
658         p->tb_period,
659         p->tb_credits_per_period,
660         p->tb_size,
661
662         /* Traffic classes */
663         p->tc_period,
664         p->tc_credits_per_period[0],
665         p->tc_credits_per_period[1],
666         p->tc_credits_per_period[2],
667         p->tc_credits_per_period[3],
668         p->tc_credits_per_period[4],
669         p->tc_credits_per_period[5],
670         p->tc_credits_per_period[6],
671         p->tc_credits_per_period[7],
672         p->tc_credits_per_period[8],
673         p->tc_credits_per_period[9],
674         p->tc_credits_per_period[10],
675         p->tc_credits_per_period[11],
676         p->tc_credits_per_period[12]);
677 }
678
679 static inline uint64_t
680 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
681 {
682         uint64_t time = time_ms;
683
684         time = (time * rate) / 1000;
685
686         return time;
687 }
688
689 static void
690 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
691         struct rte_sched_pipe_params *src,
692         struct rte_sched_pipe_profile *dst,
693         uint64_t rate)
694 {
695         uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
696         uint32_t lcd1, lcd2, lcd;
697         uint32_t i;
698
699         /* Token Bucket */
700         if (src->tb_rate == rate) {
701                 dst->tb_credits_per_period = 1;
702                 dst->tb_period = 1;
703         } else {
704                 double tb_rate = (double) src->tb_rate
705                                 / (double) rate;
706                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
707
708                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
709                         &dst->tb_period);
710         }
711
712         dst->tb_size = src->tb_size;
713
714         /* Traffic Classes */
715         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
716                                                 rate);
717
718         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
719                 if (subport->qsize[i])
720                         dst->tc_credits_per_period[i]
721                                 = rte_sched_time_ms_to_bytes(src->tc_period,
722                                         src->tc_rate[i]);
723
724         dst->tc_ov_weight = src->tc_ov_weight;
725
726         /* WRR queues */
727         wrr_cost[0] = src->wrr_weights[0];
728         wrr_cost[1] = src->wrr_weights[1];
729         wrr_cost[2] = src->wrr_weights[2];
730         wrr_cost[3] = src->wrr_weights[3];
731
732         lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
733         lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
734         lcd = rte_get_lcd(lcd1, lcd2);
735
736         wrr_cost[0] = lcd / wrr_cost[0];
737         wrr_cost[1] = lcd / wrr_cost[1];
738         wrr_cost[2] = lcd / wrr_cost[2];
739         wrr_cost[3] = lcd / wrr_cost[3];
740
741         dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
742         dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
743         dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
744         dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
745 }
746
747 static void
748 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
749         struct rte_sched_subport_profile *dst,
750         uint64_t rate)
751 {
752         uint32_t i;
753
754         /* Token Bucket */
755         if (src->tb_rate == rate) {
756                 dst->tb_credits_per_period = 1;
757                 dst->tb_period = 1;
758         } else {
759                 double tb_rate = (double) src->tb_rate
760                                 / (double) rate;
761                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
762
763                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
764                         &dst->tb_period);
765         }
766
767         dst->tb_size = src->tb_size;
768
769         /* Traffic Classes */
770         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
771
772         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
773                 dst->tc_credits_per_period[i]
774                         = rte_sched_time_ms_to_bytes(src->tc_period,
775                                 src->tc_rate[i]);
776 }
777
778 static void
779 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
780         struct rte_sched_subport_params *params, uint64_t rate)
781 {
782         uint32_t i;
783
784         for (i = 0; i < subport->n_pipe_profiles; i++) {
785                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
786                 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
787
788                 rte_sched_pipe_profile_convert(subport, src, dst, rate);
789                 rte_sched_port_log_pipe_profile(subport, i);
790         }
791
792         subport->pipe_tc_be_rate_max = 0;
793         for (i = 0; i < subport->n_pipe_profiles; i++) {
794                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
795                 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
796
797                 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
798                         subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
799         }
800 }
801
802 static void
803 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
804         struct rte_sched_port_params *params,
805         uint64_t rate)
806 {
807         uint32_t i;
808
809         for (i = 0; i < port->n_subport_profiles; i++) {
810                 struct rte_sched_subport_profile_params *src
811                                 = params->subport_profiles + i;
812                 struct rte_sched_subport_profile *dst
813                                 = port->subport_profiles + i;
814
815                 rte_sched_subport_profile_convert(src, dst, rate);
816                 rte_sched_port_log_subport_profile(port, i);
817         }
818 }
819
820 static int
821 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
822         uint32_t n_max_pipes_per_subport,
823         uint64_t rate)
824 {
825         uint32_t i;
826
827         /* Check user parameters */
828         if (params == NULL) {
829                 RTE_LOG(ERR, SCHED,
830                         "%s: Incorrect value for parameter params\n", __func__);
831                 return -EINVAL;
832         }
833
834         /* qsize: if non-zero, power of 2,
835          * no bigger than 32K (due to 16-bit read/write pointers)
836          */
837         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
838                 uint16_t qsize = params->qsize[i];
839
840                 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
841                         RTE_LOG(ERR, SCHED,
842                                 "%s: Incorrect value for qsize\n", __func__);
843                         return -EINVAL;
844                 }
845         }
846
847         if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
848                 RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
849                 return -EINVAL;
850         }
851
852         /* n_pipes_per_subport: non-zero, power of 2 */
853         if (params->n_pipes_per_subport_enabled == 0 ||
854                 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
855             !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
856                 RTE_LOG(ERR, SCHED,
857                         "%s: Incorrect value for pipes number\n", __func__);
858                 return -EINVAL;
859         }
860
861         /* pipe_profiles and n_pipe_profiles */
862         if (params->pipe_profiles == NULL ||
863             params->n_pipe_profiles == 0 ||
864                 params->n_max_pipe_profiles == 0 ||
865                 params->n_pipe_profiles > params->n_max_pipe_profiles) {
866                 RTE_LOG(ERR, SCHED,
867                         "%s: Incorrect value for pipe profiles\n", __func__);
868                 return -EINVAL;
869         }
870
871         for (i = 0; i < params->n_pipe_profiles; i++) {
872                 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
873                 int status;
874
875                 status = pipe_profile_check(p, rate, &params->qsize[0]);
876                 if (status != 0) {
877                         RTE_LOG(ERR, SCHED,
878                                 "%s: Pipe profile check failed(%d)\n", __func__, status);
879                         return -EINVAL;
880                 }
881         }
882
883         return 0;
884 }
885
886 uint32_t
887 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
888         struct rte_sched_subport_params **subport_params)
889 {
890         uint32_t size0 = 0, size1 = 0, i;
891         int status;
892
893         status = rte_sched_port_check_params(port_params);
894         if (status != 0) {
895                 RTE_LOG(ERR, SCHED,
896                         "%s: Port scheduler port params check failed (%d)\n",
897                         __func__, status);
898
899                 return 0;
900         }
901
902         for (i = 0; i < port_params->n_subports_per_port; i++) {
903                 struct rte_sched_subport_params *sp = subport_params[i];
904
905                 status = rte_sched_subport_check_params(sp,
906                                 port_params->n_pipes_per_subport,
907                                 port_params->rate);
908                 if (status != 0) {
909                         RTE_LOG(ERR, SCHED,
910                                 "%s: Port scheduler subport params check failed (%d)\n",
911                                 __func__, status);
912
913                         return 0;
914                 }
915         }
916
917         size0 = sizeof(struct rte_sched_port);
918
919         for (i = 0; i < port_params->n_subports_per_port; i++) {
920                 struct rte_sched_subport_params *sp = subport_params[i];
921
922                 size1 += rte_sched_subport_get_array_base(sp,
923                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
924         }
925
926         return size0 + size1;
927 }
928
929 struct rte_sched_port *
930 rte_sched_port_config(struct rte_sched_port_params *params)
931 {
932         struct rte_sched_port *port = NULL;
933         uint32_t size0, size1, size2;
934         uint32_t cycles_per_byte;
935         uint32_t i, j;
936         int status;
937
938         status = rte_sched_port_check_params(params);
939         if (status != 0) {
940                 RTE_LOG(ERR, SCHED,
941                         "%s: Port scheduler params check failed (%d)\n",
942                         __func__, status);
943                 return NULL;
944         }
945
946         size0 = sizeof(struct rte_sched_port);
947         size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
948         size2 = params->n_max_subport_profiles *
949                 sizeof(struct rte_sched_subport_profile);
950
951         /* Allocate memory to store the data structures */
952         port = rte_zmalloc_socket("qos_params", size0 + size1,
953                                  RTE_CACHE_LINE_SIZE, params->socket);
954         if (port == NULL) {
955                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
956
957                 return NULL;
958         }
959
960         /* Allocate memory to store the subport profile */
961         port->subport_profiles  = rte_zmalloc_socket("subport_profile", size2,
962                                         RTE_CACHE_LINE_SIZE, params->socket);
963         if (port->subport_profiles == NULL) {
964                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
965                 rte_free(port);
966                 return NULL;
967         }
968
969         /* User parameters */
970         port->n_subports_per_port = params->n_subports_per_port;
971         port->n_subport_profiles = params->n_subport_profiles;
972         port->n_max_subport_profiles = params->n_max_subport_profiles;
973         port->n_pipes_per_subport = params->n_pipes_per_subport;
974         port->n_pipes_per_subport_log2 =
975                         __builtin_ctz(params->n_pipes_per_subport);
976         port->socket = params->socket;
977
978         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
979                 port->pipe_queue[i] = i;
980
981         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
982                 port->pipe_tc[i] = j;
983
984                 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
985                         j++;
986         }
987
988         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
989                 port->tc_queue[i] = j;
990
991                 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
992                         j++;
993         }
994         port->rate = params->rate;
995         port->mtu = params->mtu + params->frame_overhead;
996         port->frame_overhead = params->frame_overhead;
997
998         /* Timing */
999         port->time_cpu_cycles = rte_get_tsc_cycles();
1000         port->time_cpu_bytes = 0;
1001         port->time = 0;
1002
1003         /* Subport profile table */
1004         rte_sched_port_config_subport_profile_table(port, params, port->rate);
1005
1006         cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1007                 / params->rate;
1008         port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1009         port->cycles_per_byte = cycles_per_byte;
1010
1011         /* Grinders */
1012         port->pkts_out = NULL;
1013         port->n_pkts_out = 0;
1014         port->subport_id = 0;
1015
1016         return port;
1017 }
1018
1019 static inline void
1020 rte_sched_subport_free(struct rte_sched_port *port,
1021         struct rte_sched_subport *subport)
1022 {
1023         uint32_t n_subport_pipe_queues;
1024         uint32_t qindex;
1025
1026         if (subport == NULL)
1027                 return;
1028
1029         n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1030
1031         /* Free enqueued mbufs */
1032         for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1033                 struct rte_mbuf **mbufs =
1034                         rte_sched_subport_pipe_qbase(subport, qindex);
1035                 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1036                 if (qsize != 0) {
1037                         struct rte_sched_queue *queue = subport->queue + qindex;
1038                         uint16_t qr = queue->qr & (qsize - 1);
1039                         uint16_t qw = queue->qw & (qsize - 1);
1040
1041                         for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1042                                 rte_pktmbuf_free(mbufs[qr]);
1043                 }
1044         }
1045
1046         rte_free(subport);
1047 }
1048
1049 void
1050 rte_sched_port_free(struct rte_sched_port *port)
1051 {
1052         uint32_t i;
1053
1054         /* Check user parameters */
1055         if (port == NULL)
1056                 return;
1057
1058         for (i = 0; i < port->n_subports_per_port; i++)
1059                 rte_sched_subport_free(port, port->subports[i]);
1060
1061         rte_free(port->subport_profiles);
1062         rte_free(port);
1063 }
1064
1065 static void
1066 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1067 {
1068         uint32_t i;
1069
1070         for (i = 0; i < n_subports; i++) {
1071                 struct rte_sched_subport *subport = port->subports[i];
1072
1073                 rte_sched_subport_free(port, subport);
1074         }
1075
1076         rte_free(port->subport_profiles);
1077         rte_free(port);
1078 }
1079
1080 #ifdef RTE_SCHED_CMAN
1081 static int
1082 rte_sched_red_config(struct rte_sched_port *port,
1083         struct rte_sched_subport *s,
1084         struct rte_sched_subport_params *params,
1085         uint32_t n_subports)
1086 {
1087         uint32_t i;
1088
1089         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1090
1091                 uint32_t j;
1092
1093                 for (j = 0; j < RTE_COLORS; j++) {
1094                         /* if min/max are both zero, then RED is disabled */
1095                         if ((params->cman_params->red_params[i][j].min_th |
1096                                  params->cman_params->red_params[i][j].max_th) == 0) {
1097                                 continue;
1098                         }
1099
1100                         if (rte_red_config_init(&s->red_config[i][j],
1101                                 params->cman_params->red_params[i][j].wq_log2,
1102                                 params->cman_params->red_params[i][j].min_th,
1103                                 params->cman_params->red_params[i][j].max_th,
1104                                 params->cman_params->red_params[i][j].maxp_inv) != 0) {
1105                                 rte_sched_free_memory(port, n_subports);
1106
1107                                 RTE_LOG(NOTICE, SCHED,
1108                                 "%s: RED configuration init fails\n", __func__);
1109                                 return -EINVAL;
1110                         }
1111                 }
1112         }
1113         s->cman = RTE_SCHED_CMAN_RED;
1114         return 0;
1115 }
1116
1117 static int
1118 rte_sched_pie_config(struct rte_sched_port *port,
1119         struct rte_sched_subport *s,
1120         struct rte_sched_subport_params *params,
1121         uint32_t n_subports)
1122 {
1123         uint32_t i;
1124
1125         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1126                 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
1127                         RTE_LOG(NOTICE, SCHED,
1128                         "%s: PIE tailq threshold incorrect\n", __func__);
1129                         return -EINVAL;
1130                 }
1131
1132                 if (rte_pie_config_init(&s->pie_config[i],
1133                         params->cman_params->pie_params[i].qdelay_ref,
1134                         params->cman_params->pie_params[i].dp_update_interval,
1135                         params->cman_params->pie_params[i].max_burst,
1136                         params->cman_params->pie_params[i].tailq_th) != 0) {
1137                         rte_sched_free_memory(port, n_subports);
1138
1139                         RTE_LOG(NOTICE, SCHED,
1140                         "%s: PIE configuration init fails\n", __func__);
1141                         return -EINVAL;
1142                         }
1143         }
1144         s->cman = RTE_SCHED_CMAN_PIE;
1145         return 0;
1146 }
1147
1148 static int
1149 rte_sched_cman_config(struct rte_sched_port *port,
1150         struct rte_sched_subport *s,
1151         struct rte_sched_subport_params *params,
1152         uint32_t n_subports)
1153 {
1154         if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
1155                 return rte_sched_red_config(port, s, params, n_subports);
1156
1157         else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
1158                 return rte_sched_pie_config(port, s, params, n_subports);
1159
1160         return -EINVAL;
1161 }
1162 #endif
1163
1164 int
1165 rte_sched_subport_config(struct rte_sched_port *port,
1166         uint32_t subport_id,
1167         struct rte_sched_subport_params *params,
1168         uint32_t subport_profile_id)
1169 {
1170         struct rte_sched_subport *s = NULL;
1171         uint32_t n_subports = subport_id;
1172         struct rte_sched_subport_profile *profile;
1173         uint32_t n_subport_pipe_queues, i;
1174         uint32_t size0, size1, bmp_mem_size;
1175         int status;
1176         int ret;
1177
1178         /* Check user parameters */
1179         if (port == NULL) {
1180                 RTE_LOG(ERR, SCHED,
1181                         "%s: Incorrect value for parameter port\n", __func__);
1182                 return 0;
1183         }
1184
1185         if (subport_id >= port->n_subports_per_port) {
1186                 RTE_LOG(ERR, SCHED,
1187                         "%s: Incorrect value for subport id\n", __func__);
1188                 ret = -EINVAL;
1189                 goto out;
1190         }
1191
1192         if (subport_profile_id >= port->n_max_subport_profiles) {
1193                 RTE_LOG(ERR, SCHED, "%s: "
1194                         "Number of subport profile exceeds the max limit\n",
1195                         __func__);
1196                 ret = -EINVAL;
1197                 goto out;
1198         }
1199
1200         /** Memory is allocated only on first invocation of the api for a
1201          * given subport. Subsequent invocation on same subport will just
1202          * update subport bandwidth parameter.
1203          **/
1204         if (port->subports[subport_id] == NULL) {
1205
1206                 status = rte_sched_subport_check_params(params,
1207                         port->n_pipes_per_subport,
1208                         port->rate);
1209                 if (status != 0) {
1210                         RTE_LOG(NOTICE, SCHED,
1211                                 "%s: Port scheduler params check failed (%d)\n",
1212                                 __func__, status);
1213                         ret = -EINVAL;
1214                         goto out;
1215                 }
1216
1217                 /* Determine the amount of memory to allocate */
1218                 size0 = sizeof(struct rte_sched_subport);
1219                 size1 = rte_sched_subport_get_array_base(params,
1220                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1221
1222                 /* Allocate memory to store the data structures */
1223                 s = rte_zmalloc_socket("subport_params", size0 + size1,
1224                         RTE_CACHE_LINE_SIZE, port->socket);
1225                 if (s == NULL) {
1226                         RTE_LOG(ERR, SCHED,
1227                                 "%s: Memory allocation fails\n", __func__);
1228                         ret = -ENOMEM;
1229                         goto out;
1230                 }
1231
1232                 n_subports++;
1233
1234                 subport_profile_id = 0;
1235
1236                 /* Port */
1237                 port->subports[subport_id] = s;
1238
1239                 s->tb_time = port->time;
1240
1241                 /* compile time checks */
1242                 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1243                 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1244                         (RTE_SCHED_PORT_N_GRINDERS - 1));
1245
1246                 /* User parameters */
1247                 s->n_pipes_per_subport_enabled =
1248                                 params->n_pipes_per_subport_enabled;
1249                 memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1250                 s->n_pipe_profiles = params->n_pipe_profiles;
1251                 s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1252
1253 #ifdef RTE_SCHED_CMAN
1254                 if (params->cman_params != NULL) {
1255                         s->cman_enabled = true;
1256                         status = rte_sched_cman_config(port, s, params, n_subports);
1257                         if (status) {
1258                                 RTE_LOG(NOTICE, SCHED,
1259                                         "%s: CMAN configuration fails\n", __func__);
1260                                 return status;
1261                         }
1262                 } else {
1263                         s->cman_enabled = false;
1264                 }
1265 #endif
1266
1267                 /* Scheduling loop detection */
1268                 s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1269                 s->pipe_exhaustion = 0;
1270
1271                 /* Grinders */
1272                 s->busy_grinders = 0;
1273
1274                 /* Queue base calculation */
1275                 rte_sched_subport_config_qsize(s);
1276
1277                 /* Large data structures */
1278                 s->pipe = (struct rte_sched_pipe *)
1279                         (s->memory + rte_sched_subport_get_array_base(params,
1280                         e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1281                 s->queue = (struct rte_sched_queue *)
1282                         (s->memory + rte_sched_subport_get_array_base(params,
1283                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1284                 s->queue_extra = (struct rte_sched_queue_extra *)
1285                         (s->memory + rte_sched_subport_get_array_base(params,
1286                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1287                 s->pipe_profiles = (struct rte_sched_pipe_profile *)
1288                         (s->memory + rte_sched_subport_get_array_base(params,
1289                         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1290                 s->bmp_array =  s->memory + rte_sched_subport_get_array_base(
1291                                 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1292                 s->queue_array = (struct rte_mbuf **)
1293                         (s->memory + rte_sched_subport_get_array_base(params,
1294                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1295
1296                 /* Pipe profile table */
1297                 rte_sched_subport_config_pipe_profile_table(s, params,
1298                                                             port->rate);
1299
1300                 /* Bitmap */
1301                 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1302                 bmp_mem_size = rte_bitmap_get_memory_footprint(
1303                                                 n_subport_pipe_queues);
1304                 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1305                                         bmp_mem_size);
1306                 if (s->bmp == NULL) {
1307                         RTE_LOG(ERR, SCHED,
1308                                 "%s: Subport bitmap init error\n", __func__);
1309                         ret = -EINVAL;
1310                         goto out;
1311                 }
1312
1313                 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1314                         s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1315
1316 #ifdef RTE_SCHED_SUBPORT_TC_OV
1317                 /* TC oversubscription */
1318                 s->tc_ov_wm_min = port->mtu;
1319                 s->tc_ov_period_id = 0;
1320                 s->tc_ov = 0;
1321                 s->tc_ov_n = 0;
1322                 s->tc_ov_rate = 0;
1323 #endif
1324         }
1325
1326         {
1327         /* update subport parameters from subport profile table*/
1328                 profile = port->subport_profiles + subport_profile_id;
1329
1330                 s = port->subports[subport_id];
1331
1332                 s->tb_credits = profile->tb_size / 2;
1333
1334                 s->tc_time = port->time + profile->tc_period;
1335
1336                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1337                         if (s->qsize[i])
1338                                 s->tc_credits[i] =
1339                                         profile->tc_credits_per_period[i];
1340                         else
1341                                 profile->tc_credits_per_period[i] = 0;
1342
1343 #ifdef RTE_SCHED_SUBPORT_TC_OV
1344                 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
1345                                                         s->pipe_tc_be_rate_max);
1346                 s->tc_ov_wm = s->tc_ov_wm_max;
1347 #endif
1348                 s->profile = subport_profile_id;
1349
1350         }
1351
1352         rte_sched_port_log_subport_profile(port, subport_profile_id);
1353
1354         return 0;
1355
1356 out:
1357         rte_sched_free_memory(port, n_subports);
1358
1359         return ret;
1360 }
1361
1362 int
1363 rte_sched_pipe_config(struct rte_sched_port *port,
1364         uint32_t subport_id,
1365         uint32_t pipe_id,
1366         int32_t pipe_profile)
1367 {
1368         struct rte_sched_subport *s;
1369         struct rte_sched_subport_profile *sp;
1370         struct rte_sched_pipe *p;
1371         struct rte_sched_pipe_profile *params;
1372         uint32_t n_subports = subport_id + 1;
1373         uint32_t deactivate, profile, i;
1374         int ret;
1375
1376         /* Check user parameters */
1377         profile = (uint32_t) pipe_profile;
1378         deactivate = (pipe_profile < 0);
1379
1380         if (port == NULL) {
1381                 RTE_LOG(ERR, SCHED,
1382                         "%s: Incorrect value for parameter port\n", __func__);
1383                 return -EINVAL;
1384         }
1385
1386         if (subport_id >= port->n_subports_per_port) {
1387                 RTE_LOG(ERR, SCHED,
1388                         "%s: Incorrect value for parameter subport id\n", __func__);
1389                 ret = -EINVAL;
1390                 goto out;
1391         }
1392
1393         s = port->subports[subport_id];
1394         if (pipe_id >= s->n_pipes_per_subport_enabled) {
1395                 RTE_LOG(ERR, SCHED,
1396                         "%s: Incorrect value for parameter pipe id\n", __func__);
1397                 ret = -EINVAL;
1398                 goto out;
1399         }
1400
1401         if (!deactivate && profile >= s->n_pipe_profiles) {
1402                 RTE_LOG(ERR, SCHED,
1403                         "%s: Incorrect value for parameter pipe profile\n", __func__);
1404                 ret = -EINVAL;
1405                 goto out;
1406         }
1407
1408         sp = port->subport_profiles + s->profile;
1409         /* Handle the case when pipe already has a valid configuration */
1410         p = s->pipe + pipe_id;
1411         if (p->tb_time) {
1412                 params = s->pipe_profiles + p->profile;
1413
1414                 double subport_tc_be_rate =
1415                 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1416                         / (double) sp->tc_period;
1417                 double pipe_tc_be_rate =
1418                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1419                         / (double) params->tc_period;
1420                 uint32_t tc_be_ov = s->tc_ov;
1421
1422                 /* Unplug pipe from its subport */
1423                 s->tc_ov_n -= params->tc_ov_weight;
1424                 s->tc_ov_rate -= pipe_tc_be_rate;
1425                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1426
1427                 if (s->tc_ov != tc_be_ov) {
1428                         RTE_LOG(DEBUG, SCHED,
1429                                 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1430                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1431                 }
1432
1433                 /* Reset the pipe */
1434                 memset(p, 0, sizeof(struct rte_sched_pipe));
1435         }
1436
1437         if (deactivate)
1438                 return 0;
1439
1440         /* Apply the new pipe configuration */
1441         p->profile = profile;
1442         params = s->pipe_profiles + p->profile;
1443
1444         /* Token Bucket (TB) */
1445         p->tb_time = port->time;
1446         p->tb_credits = params->tb_size / 2;
1447
1448         /* Traffic Classes (TCs) */
1449         p->tc_time = port->time + params->tc_period;
1450
1451         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1452                 if (s->qsize[i])
1453                         p->tc_credits[i] = params->tc_credits_per_period[i];
1454
1455         {
1456                 /* Subport best effort tc oversubscription */
1457                 double subport_tc_be_rate =
1458                 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1459                         / (double) sp->tc_period;
1460                 double pipe_tc_be_rate =
1461                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1462                         / (double) params->tc_period;
1463                 uint32_t tc_be_ov = s->tc_ov;
1464
1465                 s->tc_ov_n += params->tc_ov_weight;
1466                 s->tc_ov_rate += pipe_tc_be_rate;
1467                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1468
1469                 if (s->tc_ov != tc_be_ov) {
1470                         RTE_LOG(DEBUG, SCHED,
1471                                 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1472                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1473                 }
1474                 p->tc_ov_period_id = s->tc_ov_period_id;
1475                 p->tc_ov_credits = s->tc_ov_wm;
1476         }
1477
1478         return 0;
1479
1480 out:
1481         rte_sched_free_memory(port, n_subports);
1482
1483         return ret;
1484 }
1485
1486 int
1487 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1488         uint32_t subport_id,
1489         struct rte_sched_pipe_params *params,
1490         uint32_t *pipe_profile_id)
1491 {
1492         struct rte_sched_subport *s;
1493         struct rte_sched_pipe_profile *pp;
1494         uint32_t i;
1495         int status;
1496
1497         /* Port */
1498         if (port == NULL) {
1499                 RTE_LOG(ERR, SCHED,
1500                         "%s: Incorrect value for parameter port\n", __func__);
1501                 return -EINVAL;
1502         }
1503
1504         /* Subport id not exceeds the max limit */
1505         if (subport_id > port->n_subports_per_port) {
1506                 RTE_LOG(ERR, SCHED,
1507                         "%s: Incorrect value for subport id\n", __func__);
1508                 return -EINVAL;
1509         }
1510
1511         s = port->subports[subport_id];
1512
1513         /* Pipe profiles exceeds the max limit */
1514         if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1515                 RTE_LOG(ERR, SCHED,
1516                         "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1517                 return -EINVAL;
1518         }
1519
1520         /* Pipe params */
1521         status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1522         if (status != 0) {
1523                 RTE_LOG(ERR, SCHED,
1524                         "%s: Pipe profile check failed(%d)\n", __func__, status);
1525                 return -EINVAL;
1526         }
1527
1528         pp = &s->pipe_profiles[s->n_pipe_profiles];
1529         rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1530
1531         /* Pipe profile should not exists */
1532         for (i = 0; i < s->n_pipe_profiles; i++)
1533                 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1534                         RTE_LOG(ERR, SCHED,
1535                                 "%s: Pipe profile exists\n", __func__);
1536                         return -EINVAL;
1537                 }
1538
1539         /* Pipe profile commit */
1540         *pipe_profile_id = s->n_pipe_profiles;
1541         s->n_pipe_profiles++;
1542
1543         if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1544                 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1545
1546         rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1547
1548         return 0;
1549 }
1550
1551 int
1552 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1553         struct rte_sched_subport_profile_params *params,
1554         uint32_t *subport_profile_id)
1555 {
1556         int status;
1557         uint32_t i;
1558         struct rte_sched_subport_profile *dst;
1559
1560         /* Port */
1561         if (port == NULL) {
1562                 RTE_LOG(ERR, SCHED, "%s: "
1563                 "Incorrect value for parameter port\n", __func__);
1564                 return -EINVAL;
1565         }
1566
1567         if (params == NULL) {
1568                 RTE_LOG(ERR, SCHED, "%s: "
1569                 "Incorrect value for parameter profile\n", __func__);
1570                 return -EINVAL;
1571         }
1572
1573         if (subport_profile_id == NULL) {
1574                 RTE_LOG(ERR, SCHED, "%s: "
1575                 "Incorrect value for parameter subport_profile_id\n",
1576                 __func__);
1577                 return -EINVAL;
1578         }
1579
1580         dst = port->subport_profiles + port->n_subport_profiles;
1581
1582         /* Subport profiles exceeds the max limit */
1583         if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1584                 RTE_LOG(ERR, SCHED, "%s: "
1585                 "Number of subport profiles exceeds the max limit\n",
1586                  __func__);
1587                 return -EINVAL;
1588         }
1589
1590         status = subport_profile_check(params, port->rate);
1591         if (status != 0) {
1592                 RTE_LOG(ERR, SCHED,
1593                 "%s: subport profile check failed(%d)\n", __func__, status);
1594                 return -EINVAL;
1595         }
1596
1597         rte_sched_subport_profile_convert(params, dst, port->rate);
1598
1599         /* Subport profile should not exists */
1600         for (i = 0; i < port->n_subport_profiles; i++)
1601                 if (memcmp(port->subport_profiles + i,
1602                     dst, sizeof(*dst)) == 0) {
1603                         RTE_LOG(ERR, SCHED,
1604                         "%s: subport profile exists\n", __func__);
1605                         return -EINVAL;
1606                 }
1607
1608         /* Subport profile commit */
1609         *subport_profile_id = port->n_subport_profiles;
1610         port->n_subport_profiles++;
1611
1612         rte_sched_port_log_subport_profile(port, *subport_profile_id);
1613
1614         return 0;
1615 }
1616
1617 static inline uint32_t
1618 rte_sched_port_qindex(struct rte_sched_port *port,
1619         uint32_t subport,
1620         uint32_t pipe,
1621         uint32_t traffic_class,
1622         uint32_t queue)
1623 {
1624         return ((subport & (port->n_subports_per_port - 1)) <<
1625                 (port->n_pipes_per_subport_log2 + 4)) |
1626                 ((pipe &
1627                 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1628                 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1629                 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1630 }
1631
1632 void
1633 rte_sched_port_pkt_write(struct rte_sched_port *port,
1634                          struct rte_mbuf *pkt,
1635                          uint32_t subport, uint32_t pipe,
1636                          uint32_t traffic_class,
1637                          uint32_t queue, enum rte_color color)
1638 {
1639         uint32_t queue_id =
1640                 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1641
1642         rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1643 }
1644
1645 void
1646 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1647                                   const struct rte_mbuf *pkt,
1648                                   uint32_t *subport, uint32_t *pipe,
1649                                   uint32_t *traffic_class, uint32_t *queue)
1650 {
1651         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1652
1653         *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1654         *pipe = (queue_id >> 4) &
1655                 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1656         *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1657         *queue = rte_sched_port_tc_queue(port, queue_id);
1658 }
1659
1660 enum rte_color
1661 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1662 {
1663         return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1664 }
1665
1666 int
1667 rte_sched_subport_read_stats(struct rte_sched_port *port,
1668                              uint32_t subport_id,
1669                              struct rte_sched_subport_stats *stats,
1670                              uint32_t *tc_ov)
1671 {
1672         struct rte_sched_subport *s;
1673
1674         /* Check user parameters */
1675         if (port == NULL) {
1676                 RTE_LOG(ERR, SCHED,
1677                         "%s: Incorrect value for parameter port\n", __func__);
1678                 return -EINVAL;
1679         }
1680
1681         if (subport_id >= port->n_subports_per_port) {
1682                 RTE_LOG(ERR, SCHED,
1683                         "%s: Incorrect value for subport id\n", __func__);
1684                 return -EINVAL;
1685         }
1686
1687         if (stats == NULL) {
1688                 RTE_LOG(ERR, SCHED,
1689                         "%s: Incorrect value for parameter stats\n", __func__);
1690                 return -EINVAL;
1691         }
1692
1693         if (tc_ov == NULL) {
1694                 RTE_LOG(ERR, SCHED,
1695                         "%s: Incorrect value for tc_ov\n", __func__);
1696                 return -EINVAL;
1697         }
1698
1699         s = port->subports[subport_id];
1700
1701         /* Copy subport stats and clear */
1702         memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1703         memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1704
1705         /* Subport TC oversubscription status */
1706         *tc_ov = s->tc_ov;
1707
1708         return 0;
1709 }
1710
1711 int
1712 rte_sched_queue_read_stats(struct rte_sched_port *port,
1713         uint32_t queue_id,
1714         struct rte_sched_queue_stats *stats,
1715         uint16_t *qlen)
1716 {
1717         struct rte_sched_subport *s;
1718         struct rte_sched_queue *q;
1719         struct rte_sched_queue_extra *qe;
1720         uint32_t subport_id, subport_qmask, subport_qindex;
1721
1722         /* Check user parameters */
1723         if (port == NULL) {
1724                 RTE_LOG(ERR, SCHED,
1725                         "%s: Incorrect value for parameter port\n", __func__);
1726                 return -EINVAL;
1727         }
1728
1729         if (queue_id >= rte_sched_port_queues_per_port(port)) {
1730                 RTE_LOG(ERR, SCHED,
1731                         "%s: Incorrect value for queue id\n", __func__);
1732                 return -EINVAL;
1733         }
1734
1735         if (stats == NULL) {
1736                 RTE_LOG(ERR, SCHED,
1737                         "%s: Incorrect value for parameter stats\n", __func__);
1738                 return -EINVAL;
1739         }
1740
1741         if (qlen == NULL) {
1742                 RTE_LOG(ERR, SCHED,
1743                         "%s: Incorrect value for parameter qlen\n", __func__);
1744                 return -EINVAL;
1745         }
1746         subport_qmask = port->n_pipes_per_subport_log2 + 4;
1747         subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1748
1749         s = port->subports[subport_id];
1750         subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1751         q = s->queue + subport_qindex;
1752         qe = s->queue_extra + subport_qindex;
1753
1754         /* Copy queue stats and clear */
1755         memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1756         memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1757
1758         /* Queue length */
1759         *qlen = q->qw - q->qr;
1760
1761         return 0;
1762 }
1763
1764 #ifdef RTE_SCHED_DEBUG
1765
1766 static inline int
1767 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1768         uint32_t qindex)
1769 {
1770         struct rte_sched_queue *queue = subport->queue + qindex;
1771
1772         return queue->qr == queue->qw;
1773 }
1774
1775 #endif /* RTE_SCHED_DEBUG */
1776
1777 #ifdef RTE_SCHED_COLLECT_STATS
1778
1779 static inline void
1780 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1781         struct rte_sched_subport *subport,
1782         uint32_t qindex,
1783         struct rte_mbuf *pkt)
1784 {
1785         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1786         uint32_t pkt_len = pkt->pkt_len;
1787
1788         subport->stats.n_pkts_tc[tc_index] += 1;
1789         subport->stats.n_bytes_tc[tc_index] += pkt_len;
1790 }
1791
1792 static inline void
1793 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1794         struct rte_sched_subport *subport,
1795         uint32_t qindex,
1796         struct rte_mbuf *pkt,
1797         __rte_unused uint32_t n_pkts_cman_dropped)
1798 {
1799         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1800         uint32_t pkt_len = pkt->pkt_len;
1801
1802         subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1803         subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1804         subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
1805 }
1806
1807 static inline void
1808 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1809         uint32_t qindex,
1810         struct rte_mbuf *pkt)
1811 {
1812         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1813         uint32_t pkt_len = pkt->pkt_len;
1814
1815         qe->stats.n_pkts += 1;
1816         qe->stats.n_bytes += pkt_len;
1817 }
1818
1819 static inline void
1820 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1821         uint32_t qindex,
1822         struct rte_mbuf *pkt,
1823         __rte_unused uint32_t n_pkts_cman_dropped)
1824 {
1825         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1826         uint32_t pkt_len = pkt->pkt_len;
1827
1828         qe->stats.n_pkts_dropped += 1;
1829         qe->stats.n_bytes_dropped += pkt_len;
1830 #ifdef RTE_SCHED_CMAN
1831         if (subport->cman_enabled)
1832                 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
1833 #endif
1834 }
1835
1836 #endif /* RTE_SCHED_COLLECT_STATS */
1837
1838 #ifdef RTE_SCHED_CMAN
1839
1840 static inline int
1841 rte_sched_port_cman_drop(struct rte_sched_port *port,
1842         struct rte_sched_subport *subport,
1843         struct rte_mbuf *pkt,
1844         uint32_t qindex,
1845         uint16_t qlen)
1846 {
1847         if (!subport->cman_enabled)
1848                 return 0;
1849
1850         struct rte_sched_queue_extra *qe;
1851         uint32_t tc_index;
1852
1853         tc_index = rte_sched_port_pipe_tc(port, qindex);
1854         qe = subport->queue_extra + qindex;
1855
1856         /* RED */
1857         if (subport->cman == RTE_SCHED_CMAN_RED) {
1858                 struct rte_red_config *red_cfg;
1859                 struct rte_red *red;
1860                 enum rte_color color;
1861
1862                 color = rte_sched_port_pkt_read_color(pkt);
1863                 red_cfg = &subport->red_config[tc_index][color];
1864
1865                 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1866                         return 0;
1867
1868                 red = &qe->red;
1869
1870                 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1871         }
1872
1873         /* PIE */
1874         struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
1875         struct rte_pie *pie = &qe->pie;
1876
1877         return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
1878 }
1879
1880 static inline void
1881 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
1882         struct rte_sched_subport *subport, uint32_t qindex)
1883 {
1884         if (subport->cman_enabled) {
1885                 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1886                 if (subport->cman == RTE_SCHED_CMAN_RED) {
1887                         struct rte_red *red = &qe->red;
1888
1889                         rte_red_mark_queue_empty(red, port->time);
1890                 }
1891         }
1892 }
1893
1894 static inline void
1895 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
1896 uint32_t qindex, uint32_t pkt_len, uint64_t time) {
1897         if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
1898                 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1899                 struct rte_pie *pie = &qe->pie;
1900
1901                 /* Update queue length */
1902                 pie->qlen -= 1;
1903                 pie->qlen_bytes -= pkt_len;
1904
1905                 rte_pie_dequeue(pie, pkt_len, time);
1906         }
1907 }
1908
1909 #else
1910
1911 static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
1912         struct rte_sched_subport *subport __rte_unused,
1913         struct rte_mbuf *pkt __rte_unused,
1914         uint32_t qindex __rte_unused,
1915         uint16_t qlen __rte_unused)
1916 {
1917         return 0;
1918 }
1919
1920 #define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
1921
1922 static inline void
1923 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
1924         uint32_t qindex __rte_unused,
1925         uint32_t pkt_len __rte_unused,
1926         uint64_t time __rte_unused) {
1927         /* do-nothing when RTE_SCHED_CMAN not defined */
1928 }
1929
1930 #endif /* RTE_SCHED_CMAN */
1931
1932 #ifdef RTE_SCHED_DEBUG
1933
1934 static inline void
1935 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1936                        uint64_t bmp_slab)
1937 {
1938         uint64_t mask;
1939         uint32_t i, panic;
1940
1941         if (bmp_slab == 0)
1942                 rte_panic("Empty slab at position %u\n", bmp_pos);
1943
1944         panic = 0;
1945         for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1946                 if (mask & bmp_slab) {
1947                         if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1948                                 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1949                                 panic = 1;
1950                         }
1951                 }
1952         }
1953
1954         if (panic)
1955                 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1956                         bmp_slab, bmp_pos);
1957 }
1958
1959 #endif /* RTE_SCHED_DEBUG */
1960
1961 static inline struct rte_sched_subport *
1962 rte_sched_port_subport(struct rte_sched_port *port,
1963         struct rte_mbuf *pkt)
1964 {
1965         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1966         uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1967
1968         return port->subports[subport_id];
1969 }
1970
1971 static inline uint32_t
1972 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1973         struct rte_mbuf *pkt, uint32_t subport_qmask)
1974 {
1975         struct rte_sched_queue *q;
1976 #ifdef RTE_SCHED_COLLECT_STATS
1977         struct rte_sched_queue_extra *qe;
1978 #endif
1979         uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1980         uint32_t subport_queue_id = subport_qmask & qindex;
1981
1982         q = subport->queue + subport_queue_id;
1983         rte_prefetch0(q);
1984 #ifdef RTE_SCHED_COLLECT_STATS
1985         qe = subport->queue_extra + subport_queue_id;
1986         rte_prefetch0(qe);
1987 #endif
1988
1989         return subport_queue_id;
1990 }
1991
1992 static inline void
1993 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1994         struct rte_sched_subport *subport,
1995         uint32_t qindex,
1996         struct rte_mbuf **qbase)
1997 {
1998         struct rte_sched_queue *q;
1999         struct rte_mbuf **q_qw;
2000         uint16_t qsize;
2001
2002         q = subport->queue + qindex;
2003         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2004         q_qw = qbase + (q->qw & (qsize - 1));
2005
2006         rte_prefetch0(q_qw);
2007         rte_bitmap_prefetch0(subport->bmp, qindex);
2008 }
2009
2010 static inline int
2011 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
2012         struct rte_sched_subport *subport,
2013         uint32_t qindex,
2014         struct rte_mbuf **qbase,
2015         struct rte_mbuf *pkt)
2016 {
2017         struct rte_sched_queue *q;
2018         uint16_t qsize;
2019         uint16_t qlen;
2020
2021         q = subport->queue + qindex;
2022         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2023         qlen = q->qw - q->qr;
2024
2025         /* Drop the packet (and update drop stats) when queue is full */
2026         if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
2027                      (qlen >= qsize))) {
2028                 rte_pktmbuf_free(pkt);
2029 #ifdef RTE_SCHED_COLLECT_STATS
2030                 rte_sched_port_update_subport_stats_on_drop(port, subport,
2031                         qindex, pkt, qlen < qsize);
2032                 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
2033                         qlen < qsize);
2034 #endif
2035                 return 0;
2036         }
2037
2038         /* Enqueue packet */
2039         qbase[q->qw & (qsize - 1)] = pkt;
2040         q->qw++;
2041
2042         /* Activate queue in the subport bitmap */
2043         rte_bitmap_set(subport->bmp, qindex);
2044
2045         /* Statistics */
2046 #ifdef RTE_SCHED_COLLECT_STATS
2047         rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2048         rte_sched_port_update_queue_stats(subport, qindex, pkt);
2049 #endif
2050
2051         return 1;
2052 }
2053
2054
2055 /*
2056  * The enqueue function implements a 4-level pipeline with each stage
2057  * processing two different packets. The purpose of using a pipeline
2058  * is to hide the latency of prefetching the data structures. The
2059  * naming convention is presented in the diagram below:
2060  *
2061  *   p00  _______   p10  _______   p20  _______   p30  _______
2062  * ----->|       |----->|       |----->|       |----->|       |----->
2063  *       |   0   |      |   1   |      |   2   |      |   3   |
2064  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2065  *   p01            p11            p21            p31
2066  *
2067  */
2068 int
2069 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2070                        uint32_t n_pkts)
2071 {
2072         struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2073                 *pkt30, *pkt31, *pkt_last;
2074         struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2075                 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2076         struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2077                 *subport20, *subport21, *subport30, *subport31, *subport_last;
2078         uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2079         uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2080         uint32_t subport_qmask;
2081         uint32_t result, i;
2082
2083         result = 0;
2084         subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2085
2086         /*
2087          * Less then 6 input packets available, which is not enough to
2088          * feed the pipeline
2089          */
2090         if (unlikely(n_pkts < 6)) {
2091                 struct rte_sched_subport *subports[5];
2092                 struct rte_mbuf **q_base[5];
2093                 uint32_t q[5];
2094
2095                 /* Prefetch the mbuf structure of each packet */
2096                 for (i = 0; i < n_pkts; i++)
2097                         rte_prefetch0(pkts[i]);
2098
2099                 /* Prefetch the subport structure for each packet */
2100                 for (i = 0; i < n_pkts; i++)
2101                         subports[i] = rte_sched_port_subport(port, pkts[i]);
2102
2103                 /* Prefetch the queue structure for each queue */
2104                 for (i = 0; i < n_pkts; i++)
2105                         q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2106                                         pkts[i], subport_qmask);
2107
2108                 /* Prefetch the write pointer location of each queue */
2109                 for (i = 0; i < n_pkts; i++) {
2110                         q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2111                         rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2112                                 q[i], q_base[i]);
2113                 }
2114
2115                 /* Write each packet to its queue */
2116                 for (i = 0; i < n_pkts; i++)
2117                         result += rte_sched_port_enqueue_qwa(port, subports[i],
2118                                                 q[i], q_base[i], pkts[i]);
2119
2120                 return result;
2121         }
2122
2123         /* Feed the first 3 stages of the pipeline (6 packets needed) */
2124         pkt20 = pkts[0];
2125         pkt21 = pkts[1];
2126         rte_prefetch0(pkt20);
2127         rte_prefetch0(pkt21);
2128
2129         pkt10 = pkts[2];
2130         pkt11 = pkts[3];
2131         rte_prefetch0(pkt10);
2132         rte_prefetch0(pkt11);
2133
2134         subport20 = rte_sched_port_subport(port, pkt20);
2135         subport21 = rte_sched_port_subport(port, pkt21);
2136         q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2137                         pkt20, subport_qmask);
2138         q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2139                         pkt21, subport_qmask);
2140
2141         pkt00 = pkts[4];
2142         pkt01 = pkts[5];
2143         rte_prefetch0(pkt00);
2144         rte_prefetch0(pkt01);
2145
2146         subport10 = rte_sched_port_subport(port, pkt10);
2147         subport11 = rte_sched_port_subport(port, pkt11);
2148         q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2149                         pkt10, subport_qmask);
2150         q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2151                         pkt11, subport_qmask);
2152
2153         q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2154         q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2155         rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2156         rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2157
2158         /* Run the pipeline */
2159         for (i = 6; i < (n_pkts & (~1)); i += 2) {
2160                 /* Propagate stage inputs */
2161                 pkt30 = pkt20;
2162                 pkt31 = pkt21;
2163                 pkt20 = pkt10;
2164                 pkt21 = pkt11;
2165                 pkt10 = pkt00;
2166                 pkt11 = pkt01;
2167                 q30 = q20;
2168                 q31 = q21;
2169                 q20 = q10;
2170                 q21 = q11;
2171                 subport30 = subport20;
2172                 subport31 = subport21;
2173                 subport20 = subport10;
2174                 subport21 = subport11;
2175                 q30_base = q20_base;
2176                 q31_base = q21_base;
2177
2178                 /* Stage 0: Get packets in */
2179                 pkt00 = pkts[i];
2180                 pkt01 = pkts[i + 1];
2181                 rte_prefetch0(pkt00);
2182                 rte_prefetch0(pkt01);
2183
2184                 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2185                 subport10 = rte_sched_port_subport(port, pkt10);
2186                 subport11 = rte_sched_port_subport(port, pkt11);
2187                 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2188                                 pkt10, subport_qmask);
2189                 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2190                                 pkt11, subport_qmask);
2191
2192                 /* Stage 2: Prefetch queue write location */
2193                 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2194                 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2195                 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2196                 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2197
2198                 /* Stage 3: Write packet to queue and activate queue */
2199                 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2200                                 q30, q30_base, pkt30);
2201                 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2202                                 q31, q31_base, pkt31);
2203                 result += r30 + r31;
2204         }
2205
2206         /*
2207          * Drain the pipeline (exactly 6 packets).
2208          * Handle the last packet in the case
2209          * of an odd number of input packets.
2210          */
2211         pkt_last = pkts[n_pkts - 1];
2212         rte_prefetch0(pkt_last);
2213
2214         subport00 = rte_sched_port_subport(port, pkt00);
2215         subport01 = rte_sched_port_subport(port, pkt01);
2216         q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2217                         pkt00, subport_qmask);
2218         q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2219                         pkt01, subport_qmask);
2220
2221         q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2222         q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2223         rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2224         rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2225
2226         r20 = rte_sched_port_enqueue_qwa(port, subport20,
2227                         q20, q20_base, pkt20);
2228         r21 = rte_sched_port_enqueue_qwa(port, subport21,
2229                         q21, q21_base, pkt21);
2230         result += r20 + r21;
2231
2232         subport_last = rte_sched_port_subport(port, pkt_last);
2233         q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2234                                 pkt_last, subport_qmask);
2235
2236         q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2237         q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2238         rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2239         rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2240
2241         r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2242                         q10_base, pkt10);
2243         r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2244                         q11_base, pkt11);
2245         result += r10 + r11;
2246
2247         q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2248         rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2249                 q_last, q_last_base);
2250
2251         r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2252                         q00_base, pkt00);
2253         r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2254                         q01_base, pkt01);
2255         result += r00 + r01;
2256
2257         if (n_pkts & 1) {
2258                 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2259                                         q_last, q_last_base, pkt_last);
2260                 result += r_last;
2261         }
2262
2263         return result;
2264 }
2265
2266 #ifndef RTE_SCHED_SUBPORT_TC_OV
2267
2268 static inline void
2269 grinder_credits_update(struct rte_sched_port *port,
2270         struct rte_sched_subport *subport, uint32_t pos)
2271 {
2272         struct rte_sched_grinder *grinder = subport->grinder + pos;
2273         struct rte_sched_pipe *pipe = grinder->pipe;
2274         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2275         struct rte_sched_subport_profile *sp = grinder->subport_params;
2276         uint64_t n_periods;
2277         uint32_t i;
2278
2279         /* Subport TB */
2280         n_periods = (port->time - subport->tb_time) / sp->tb_period;
2281         subport->tb_credits += n_periods * sp->tb_credits_per_period;
2282         subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2283         subport->tb_time += n_periods * sp->tb_period;
2284
2285         /* Pipe TB */
2286         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2287         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2288         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2289         pipe->tb_time += n_periods * params->tb_period;
2290
2291         /* Subport TCs */
2292         if (unlikely(port->time >= subport->tc_time)) {
2293                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2294                         subport->tc_credits[i] = sp->tc_credits_per_period[i];
2295
2296                 subport->tc_time = port->time + sp->tc_period;
2297         }
2298
2299         /* Pipe TCs */
2300         if (unlikely(port->time >= pipe->tc_time)) {
2301                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2302                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2303
2304                 pipe->tc_time = port->time + params->tc_period;
2305         }
2306 }
2307
2308 #else
2309
2310 static inline uint64_t
2311 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2312         struct rte_sched_subport *subport, uint32_t pos)
2313 {
2314         struct rte_sched_grinder *grinder = subport->grinder + pos;
2315         struct rte_sched_subport_profile *sp = grinder->subport_params;
2316         uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2317         uint64_t tc_consumption = 0, tc_ov_consumption_max;
2318         uint64_t tc_ov_wm = subport->tc_ov_wm;
2319         uint32_t i;
2320
2321         if (subport->tc_ov == 0)
2322                 return subport->tc_ov_wm_max;
2323
2324         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2325                 tc_ov_consumption[i] = sp->tc_credits_per_period[i]
2326                                         -  subport->tc_credits[i];
2327                 tc_consumption += tc_ov_consumption[i];
2328         }
2329
2330         tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2331         sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2332                 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2333
2334         tc_ov_consumption_max =
2335         sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2336                         tc_consumption;
2337
2338         if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2339                 (tc_ov_consumption_max - port->mtu)) {
2340                 tc_ov_wm  -= tc_ov_wm >> 7;
2341                 if (tc_ov_wm < subport->tc_ov_wm_min)
2342                         tc_ov_wm = subport->tc_ov_wm_min;
2343
2344                 return tc_ov_wm;
2345         }
2346
2347         tc_ov_wm += (tc_ov_wm >> 7) + 1;
2348         if (tc_ov_wm > subport->tc_ov_wm_max)
2349                 tc_ov_wm = subport->tc_ov_wm_max;
2350
2351         return tc_ov_wm;
2352 }
2353
2354 static inline void
2355 grinder_credits_update(struct rte_sched_port *port,
2356         struct rte_sched_subport *subport, uint32_t pos)
2357 {
2358         struct rte_sched_grinder *grinder = subport->grinder + pos;
2359         struct rte_sched_pipe *pipe = grinder->pipe;
2360         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2361         struct rte_sched_subport_profile *sp = grinder->subport_params;
2362         uint64_t n_periods;
2363         uint32_t i;
2364
2365         /* Subport TB */
2366         n_periods = (port->time - subport->tb_time) / sp->tb_period;
2367         subport->tb_credits += n_periods * sp->tb_credits_per_period;
2368         subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2369         subport->tb_time += n_periods * sp->tb_period;
2370
2371         /* Pipe TB */
2372         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2373         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2374         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2375         pipe->tb_time += n_periods * params->tb_period;
2376
2377         /* Subport TCs */
2378         if (unlikely(port->time >= subport->tc_time)) {
2379                 subport->tc_ov_wm =
2380                         grinder_tc_ov_credits_update(port, subport, pos);
2381
2382                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2383                         subport->tc_credits[i] = sp->tc_credits_per_period[i];
2384
2385                 subport->tc_time = port->time + sp->tc_period;
2386                 subport->tc_ov_period_id++;
2387         }
2388
2389         /* Pipe TCs */
2390         if (unlikely(port->time >= pipe->tc_time)) {
2391                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2392                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2393                 pipe->tc_time = port->time + params->tc_period;
2394         }
2395
2396         /* Pipe TCs - Oversubscription */
2397         if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2398                 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2399
2400                 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2401         }
2402 }
2403
2404 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2405
2406
2407 #ifndef RTE_SCHED_SUBPORT_TC_OV
2408
2409 static inline int
2410 grinder_credits_check(struct rte_sched_port *port,
2411         struct rte_sched_subport *subport, uint32_t pos)
2412 {
2413         struct rte_sched_grinder *grinder = subport->grinder + pos;
2414         struct rte_sched_pipe *pipe = grinder->pipe;
2415         struct rte_mbuf *pkt = grinder->pkt;
2416         uint32_t tc_index = grinder->tc_index;
2417         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2418         uint64_t subport_tb_credits = subport->tb_credits;
2419         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2420         uint64_t pipe_tb_credits = pipe->tb_credits;
2421         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2422         int enough_credits;
2423
2424         /* Check queue credits */
2425         enough_credits = (pkt_len <= subport_tb_credits) &&
2426                 (pkt_len <= subport_tc_credits) &&
2427                 (pkt_len <= pipe_tb_credits) &&
2428                 (pkt_len <= pipe_tc_credits);
2429
2430         if (!enough_credits)
2431                 return 0;
2432
2433         /* Update port credits */
2434         subport->tb_credits -= pkt_len;
2435         subport->tc_credits[tc_index] -= pkt_len;
2436         pipe->tb_credits -= pkt_len;
2437         pipe->tc_credits[tc_index] -= pkt_len;
2438
2439         return 1;
2440 }
2441
2442 #else
2443
2444 static inline int
2445 grinder_credits_check(struct rte_sched_port *port,
2446         struct rte_sched_subport *subport, uint32_t pos)
2447 {
2448         struct rte_sched_grinder *grinder = subport->grinder + pos;
2449         struct rte_sched_pipe *pipe = grinder->pipe;
2450         struct rte_mbuf *pkt = grinder->pkt;
2451         uint32_t tc_index = grinder->tc_index;
2452         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2453         uint64_t subport_tb_credits = subport->tb_credits;
2454         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2455         uint64_t pipe_tb_credits = pipe->tb_credits;
2456         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2457         uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2458         uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2459         uint64_t pipe_tc_ov_credits;
2460         uint32_t i;
2461         int enough_credits;
2462
2463         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2464                 pipe_tc_ov_mask1[i] = ~0LLU;
2465
2466         pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2467         pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2468         pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2469
2470         /* Check pipe and subport credits */
2471         enough_credits = (pkt_len <= subport_tb_credits) &&
2472                 (pkt_len <= subport_tc_credits) &&
2473                 (pkt_len <= pipe_tb_credits) &&
2474                 (pkt_len <= pipe_tc_credits) &&
2475                 (pkt_len <= pipe_tc_ov_credits);
2476
2477         if (!enough_credits)
2478                 return 0;
2479
2480         /* Update pipe and subport credits */
2481         subport->tb_credits -= pkt_len;
2482         subport->tc_credits[tc_index] -= pkt_len;
2483         pipe->tb_credits -= pkt_len;
2484         pipe->tc_credits[tc_index] -= pkt_len;
2485         pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2486
2487         return 1;
2488 }
2489
2490 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2491
2492
2493 static inline int
2494 grinder_schedule(struct rte_sched_port *port,
2495         struct rte_sched_subport *subport, uint32_t pos)
2496 {
2497         struct rte_sched_grinder *grinder = subport->grinder + pos;
2498         struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2499         uint32_t qindex = grinder->qindex[grinder->qpos];
2500         struct rte_mbuf *pkt = grinder->pkt;
2501         uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2502         uint32_t be_tc_active;
2503
2504         if (!grinder_credits_check(port, subport, pos))
2505                 return 0;
2506
2507         /* Advance port time */
2508         port->time += pkt_len;
2509
2510         /* Send packet */
2511         port->pkts_out[port->n_pkts_out++] = pkt;
2512         queue->qr++;
2513
2514         be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2515         grinder->wrr_tokens[grinder->qpos] +=
2516                 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2517
2518         if (queue->qr == queue->qw) {
2519                 rte_bitmap_clear(subport->bmp, qindex);
2520                 grinder->qmask &= ~(1 << grinder->qpos);
2521                 if (be_tc_active)
2522                         grinder->wrr_mask[grinder->qpos] = 0;
2523
2524                 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
2525         }
2526
2527         rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
2528
2529         /* Reset pipe loop detection */
2530         subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2531         grinder->productive = 1;
2532
2533         return 1;
2534 }
2535
2536 static inline int
2537 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2538 {
2539         uint32_t i;
2540
2541         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2542                 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2543                         return 1;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static inline void
2550 grinder_pcache_populate(struct rte_sched_subport *subport,
2551         uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2552 {
2553         struct rte_sched_grinder *grinder = subport->grinder + pos;
2554         uint16_t w[4];
2555
2556         grinder->pcache_w = 0;
2557         grinder->pcache_r = 0;
2558
2559         w[0] = (uint16_t) bmp_slab;
2560         w[1] = (uint16_t) (bmp_slab >> 16);
2561         w[2] = (uint16_t) (bmp_slab >> 32);
2562         w[3] = (uint16_t) (bmp_slab >> 48);
2563
2564         grinder->pcache_qmask[grinder->pcache_w] = w[0];
2565         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2566         grinder->pcache_w += (w[0] != 0);
2567
2568         grinder->pcache_qmask[grinder->pcache_w] = w[1];
2569         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2570         grinder->pcache_w += (w[1] != 0);
2571
2572         grinder->pcache_qmask[grinder->pcache_w] = w[2];
2573         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2574         grinder->pcache_w += (w[2] != 0);
2575
2576         grinder->pcache_qmask[grinder->pcache_w] = w[3];
2577         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2578         grinder->pcache_w += (w[3] != 0);
2579 }
2580
2581 static inline void
2582 grinder_tccache_populate(struct rte_sched_subport *subport,
2583         uint32_t pos, uint32_t qindex, uint16_t qmask)
2584 {
2585         struct rte_sched_grinder *grinder = subport->grinder + pos;
2586         uint8_t b, i;
2587
2588         grinder->tccache_w = 0;
2589         grinder->tccache_r = 0;
2590
2591         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2592                 b = (uint8_t) ((qmask >> i) & 0x1);
2593                 grinder->tccache_qmask[grinder->tccache_w] = b;
2594                 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2595                 grinder->tccache_w += (b != 0);
2596         }
2597
2598         b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2599         grinder->tccache_qmask[grinder->tccache_w] = b;
2600         grinder->tccache_qindex[grinder->tccache_w] = qindex +
2601                 RTE_SCHED_TRAFFIC_CLASS_BE;
2602         grinder->tccache_w += (b != 0);
2603 }
2604
2605 static inline int
2606 grinder_next_tc(struct rte_sched_port *port,
2607         struct rte_sched_subport *subport, uint32_t pos)
2608 {
2609         struct rte_sched_grinder *grinder = subport->grinder + pos;
2610         struct rte_mbuf **qbase;
2611         uint32_t qindex;
2612         uint16_t qsize;
2613
2614         if (grinder->tccache_r == grinder->tccache_w)
2615                 return 0;
2616
2617         qindex = grinder->tccache_qindex[grinder->tccache_r];
2618         qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2619         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2620
2621         grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2622         grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2623         grinder->qsize = qsize;
2624
2625         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2626                 grinder->queue[0] = subport->queue + qindex;
2627                 grinder->qbase[0] = qbase;
2628                 grinder->qindex[0] = qindex;
2629                 grinder->tccache_r++;
2630
2631                 return 1;
2632         }
2633
2634         grinder->queue[0] = subport->queue + qindex;
2635         grinder->queue[1] = subport->queue + qindex + 1;
2636         grinder->queue[2] = subport->queue + qindex + 2;
2637         grinder->queue[3] = subport->queue + qindex + 3;
2638
2639         grinder->qbase[0] = qbase;
2640         grinder->qbase[1] = qbase + qsize;
2641         grinder->qbase[2] = qbase + 2 * qsize;
2642         grinder->qbase[3] = qbase + 3 * qsize;
2643
2644         grinder->qindex[0] = qindex;
2645         grinder->qindex[1] = qindex + 1;
2646         grinder->qindex[2] = qindex + 2;
2647         grinder->qindex[3] = qindex + 3;
2648
2649         grinder->tccache_r++;
2650         return 1;
2651 }
2652
2653 static inline int
2654 grinder_next_pipe(struct rte_sched_port *port,
2655         struct rte_sched_subport *subport, uint32_t pos)
2656 {
2657         struct rte_sched_grinder *grinder = subport->grinder + pos;
2658         uint32_t pipe_qindex;
2659         uint16_t pipe_qmask;
2660
2661         if (grinder->pcache_r < grinder->pcache_w) {
2662                 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2663                 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2664                 grinder->pcache_r++;
2665         } else {
2666                 uint64_t bmp_slab = 0;
2667                 uint32_t bmp_pos = 0;
2668
2669                 /* Get another non-empty pipe group */
2670                 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2671                         return 0;
2672
2673 #ifdef RTE_SCHED_DEBUG
2674                 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2675 #endif
2676
2677                 /* Return if pipe group already in one of the other grinders */
2678                 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2679                 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2680                         return 0;
2681
2682                 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2683
2684                 /* Install new pipe group into grinder's pipe cache */
2685                 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2686
2687                 pipe_qmask = grinder->pcache_qmask[0];
2688                 pipe_qindex = grinder->pcache_qindex[0];
2689                 grinder->pcache_r = 1;
2690         }
2691
2692         /* Install new pipe in the grinder */
2693         grinder->pindex = pipe_qindex >> 4;
2694         grinder->subport = subport;
2695         grinder->pipe = subport->pipe + grinder->pindex;
2696         grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2697         grinder->productive = 0;
2698
2699         grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2700         grinder_next_tc(port, subport, pos);
2701
2702         /* Check for pipe exhaustion */
2703         if (grinder->pindex == subport->pipe_loop) {
2704                 subport->pipe_exhaustion = 1;
2705                 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2706         }
2707
2708         return 1;
2709 }
2710
2711
2712 static inline void
2713 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2714 {
2715         struct rte_sched_grinder *grinder = subport->grinder + pos;
2716         struct rte_sched_pipe *pipe = grinder->pipe;
2717         struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2718         uint32_t qmask = grinder->qmask;
2719
2720         grinder->wrr_tokens[0] =
2721                 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2722         grinder->wrr_tokens[1] =
2723                 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2724         grinder->wrr_tokens[2] =
2725                 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2726         grinder->wrr_tokens[3] =
2727                 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2728
2729         grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2730         grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2731         grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2732         grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2733
2734         grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2735         grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2736         grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2737         grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2738 }
2739
2740 static inline void
2741 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2742 {
2743         struct rte_sched_grinder *grinder = subport->grinder + pos;
2744         struct rte_sched_pipe *pipe = grinder->pipe;
2745
2746         pipe->wrr_tokens[0] =
2747                         (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2748                                 RTE_SCHED_WRR_SHIFT;
2749         pipe->wrr_tokens[1] =
2750                         (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2751                                 RTE_SCHED_WRR_SHIFT;
2752         pipe->wrr_tokens[2] =
2753                         (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2754                                 RTE_SCHED_WRR_SHIFT;
2755         pipe->wrr_tokens[3] =
2756                         (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2757                                 RTE_SCHED_WRR_SHIFT;
2758 }
2759
2760 static inline void
2761 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2762 {
2763         struct rte_sched_grinder *grinder = subport->grinder + pos;
2764         uint16_t wrr_tokens_min;
2765
2766         grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2767         grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2768         grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2769         grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2770
2771         grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2772         wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2773
2774         grinder->wrr_tokens[0] -= wrr_tokens_min;
2775         grinder->wrr_tokens[1] -= wrr_tokens_min;
2776         grinder->wrr_tokens[2] -= wrr_tokens_min;
2777         grinder->wrr_tokens[3] -= wrr_tokens_min;
2778 }
2779
2780
2781 #define grinder_evict(subport, pos)
2782
2783 static inline void
2784 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2785 {
2786         struct rte_sched_grinder *grinder = subport->grinder + pos;
2787
2788         rte_prefetch0(grinder->pipe);
2789         rte_prefetch0(grinder->queue[0]);
2790 }
2791
2792 static inline void
2793 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2794 {
2795         struct rte_sched_grinder *grinder = subport->grinder + pos;
2796         uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2797
2798         qsize = grinder->qsize;
2799         grinder->qpos = 0;
2800
2801         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2802                 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2803
2804                 rte_prefetch0(grinder->qbase[0] + qr[0]);
2805                 return;
2806         }
2807
2808         qr[0] = grinder->queue[0]->qr & (qsize - 1);
2809         qr[1] = grinder->queue[1]->qr & (qsize - 1);
2810         qr[2] = grinder->queue[2]->qr & (qsize - 1);
2811         qr[3] = grinder->queue[3]->qr & (qsize - 1);
2812
2813         rte_prefetch0(grinder->qbase[0] + qr[0]);
2814         rte_prefetch0(grinder->qbase[1] + qr[1]);
2815
2816         grinder_wrr_load(subport, pos);
2817         grinder_wrr(subport, pos);
2818
2819         rte_prefetch0(grinder->qbase[2] + qr[2]);
2820         rte_prefetch0(grinder->qbase[3] + qr[3]);
2821 }
2822
2823 static inline void
2824 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2825 {
2826         struct rte_sched_grinder *grinder = subport->grinder + pos;
2827         uint32_t qpos = grinder->qpos;
2828         struct rte_mbuf **qbase = grinder->qbase[qpos];
2829         uint16_t qsize = grinder->qsize;
2830         uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2831
2832         grinder->pkt = qbase[qr];
2833         rte_prefetch0(grinder->pkt);
2834
2835         if (unlikely((qr & 0x7) == 7)) {
2836                 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2837
2838                 rte_prefetch0(qbase + qr_next);
2839         }
2840 }
2841
2842 static inline uint32_t
2843 grinder_handle(struct rte_sched_port *port,
2844         struct rte_sched_subport *subport, uint32_t pos)
2845 {
2846         struct rte_sched_grinder *grinder = subport->grinder + pos;
2847
2848         switch (grinder->state) {
2849         case e_GRINDER_PREFETCH_PIPE:
2850         {
2851                 if (grinder_next_pipe(port, subport, pos)) {
2852                         grinder_prefetch_pipe(subport, pos);
2853                         subport->busy_grinders++;
2854
2855                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2856                         return 0;
2857                 }
2858
2859                 return 0;
2860         }
2861
2862         case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2863         {
2864                 struct rte_sched_pipe *pipe = grinder->pipe;
2865
2866                 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2867                 grinder->subport_params = port->subport_profiles +
2868                                                 subport->profile;
2869
2870                 grinder_prefetch_tc_queue_arrays(subport, pos);
2871                 grinder_credits_update(port, subport, pos);
2872
2873                 grinder->state = e_GRINDER_PREFETCH_MBUF;
2874                 return 0;
2875         }
2876
2877         case e_GRINDER_PREFETCH_MBUF:
2878         {
2879                 grinder_prefetch_mbuf(subport, pos);
2880
2881                 grinder->state = e_GRINDER_READ_MBUF;
2882                 return 0;
2883         }
2884
2885         case e_GRINDER_READ_MBUF:
2886         {
2887                 uint32_t wrr_active, result = 0;
2888
2889                 result = grinder_schedule(port, subport, pos);
2890
2891                 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2892
2893                 /* Look for next packet within the same TC */
2894                 if (result && grinder->qmask) {
2895                         if (wrr_active)
2896                                 grinder_wrr(subport, pos);
2897
2898                         grinder_prefetch_mbuf(subport, pos);
2899
2900                         return 1;
2901                 }
2902
2903                 if (wrr_active)
2904                         grinder_wrr_store(subport, pos);
2905
2906                 /* Look for another active TC within same pipe */
2907                 if (grinder_next_tc(port, subport, pos)) {
2908                         grinder_prefetch_tc_queue_arrays(subport, pos);
2909
2910                         grinder->state = e_GRINDER_PREFETCH_MBUF;
2911                         return result;
2912                 }
2913
2914                 if (grinder->productive == 0 &&
2915                     subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2916                         subport->pipe_loop = grinder->pindex;
2917
2918                 grinder_evict(subport, pos);
2919
2920                 /* Look for another active pipe */
2921                 if (grinder_next_pipe(port, subport, pos)) {
2922                         grinder_prefetch_pipe(subport, pos);
2923
2924                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2925                         return result;
2926                 }
2927
2928                 /* No active pipe found */
2929                 subport->busy_grinders--;
2930
2931                 grinder->state = e_GRINDER_PREFETCH_PIPE;
2932                 return result;
2933         }
2934
2935         default:
2936                 rte_panic("Algorithmic error (invalid state)\n");
2937                 return 0;
2938         }
2939 }
2940
2941 static inline void
2942 rte_sched_port_time_resync(struct rte_sched_port *port)
2943 {
2944         uint64_t cycles = rte_get_tsc_cycles();
2945         uint64_t cycles_diff;
2946         uint64_t bytes_diff;
2947         uint32_t i;
2948
2949         if (cycles < port->time_cpu_cycles)
2950                 port->time_cpu_cycles = 0;
2951
2952         cycles_diff = cycles - port->time_cpu_cycles;
2953         /* Compute elapsed time in bytes */
2954         bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2955                                            port->inv_cycles_per_byte);
2956
2957         /* Advance port time */
2958         port->time_cpu_cycles +=
2959                 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
2960         port->time_cpu_bytes += bytes_diff;
2961         if (port->time < port->time_cpu_bytes)
2962                 port->time = port->time_cpu_bytes;
2963
2964         /* Reset pipe loop detection */
2965         for (i = 0; i < port->n_subports_per_port; i++)
2966                 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2967 }
2968
2969 static inline int
2970 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2971 {
2972         int exceptions;
2973
2974         /* Check if any exception flag is set */
2975         exceptions = (second_pass && subport->busy_grinders == 0) ||
2976                 (subport->pipe_exhaustion == 1);
2977
2978         /* Clear exception flags */
2979         subport->pipe_exhaustion = 0;
2980
2981         return exceptions;
2982 }
2983
2984 int
2985 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2986 {
2987         struct rte_sched_subport *subport;
2988         uint32_t subport_id = port->subport_id;
2989         uint32_t i, n_subports = 0, count;
2990
2991         port->pkts_out = pkts;
2992         port->n_pkts_out = 0;
2993
2994         rte_sched_port_time_resync(port);
2995
2996         /* Take each queue in the grinder one step further */
2997         for (i = 0, count = 0; ; i++)  {
2998                 subport = port->subports[subport_id];
2999
3000                 count += grinder_handle(port, subport,
3001                                 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
3002
3003                 if (count == n_pkts) {
3004                         subport_id++;
3005
3006                         if (subport_id == port->n_subports_per_port)
3007                                 subport_id = 0;
3008
3009                         port->subport_id = subport_id;
3010                         break;
3011                 }
3012
3013                 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
3014                         i = 0;
3015                         subport_id++;
3016                         n_subports++;
3017                 }
3018
3019                 if (subport_id == port->n_subports_per_port)
3020                         subport_id = 0;
3021
3022                 if (n_subports == port->n_subports_per_port) {
3023                         port->subport_id = subport_id;
3024                         break;
3025                 }
3026         }
3027
3028         return count;
3029 }