895b40d72c93cb86166291d7c920f1dff531ff9c
[dpdk.git] / lib / librte_sched / rte_sched.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_mbuf.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
18
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
22
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
25 #endif
26
27 #ifdef RTE_SCHED_VECTOR
28 #include <rte_vect.h>
29
30 #ifdef RTE_ARCH_X86
31 #define SCHED_VECTOR_SSE4
32 #elif defined(__ARM_NEON)
33 #define SCHED_VECTOR_NEON
34 #endif
35
36 #endif
37
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR          (1e-7)
39 #define RTE_SCHED_WRR_SHIFT                   3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC           RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE         (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID                UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID             UINT32_MAX
44
45 /* Scaling for cycles_per_byte calculation
46  * Chosen so that minimum rate is 480 bit/sec
47  */
48 #define RTE_SCHED_TIME_SHIFT                  8
49
50 struct rte_sched_pipe_profile {
51         /* Token bucket (TB) */
52         uint64_t tb_period;
53         uint64_t tb_credits_per_period;
54         uint64_t tb_size;
55
56         /* Pipe traffic classes */
57         uint64_t tc_period;
58         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
59         uint8_t tc_ov_weight;
60
61         /* Pipe best-effort traffic class queues */
62         uint8_t  wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
63 };
64
65 struct rte_sched_pipe {
66         /* Token bucket (TB) */
67         uint64_t tb_time; /* time of last update */
68         uint64_t tb_credits;
69
70         /* Pipe profile and flags */
71         uint32_t profile;
72
73         /* Traffic classes (TCs) */
74         uint64_t tc_time; /* time of next update */
75         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
76
77         /* Weighted Round Robin (WRR) */
78         uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
79
80         /* TC oversubscription */
81         uint64_t tc_ov_credits;
82         uint8_t tc_ov_period_id;
83 } __rte_cache_aligned;
84
85 struct rte_sched_queue {
86         uint16_t qw;
87         uint16_t qr;
88 };
89
90 struct rte_sched_queue_extra {
91         struct rte_sched_queue_stats stats;
92 #ifdef RTE_SCHED_RED
93         struct rte_red red;
94 #endif
95 };
96
97 enum grinder_state {
98         e_GRINDER_PREFETCH_PIPE = 0,
99         e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
100         e_GRINDER_PREFETCH_MBUF,
101         e_GRINDER_READ_MBUF
102 };
103
104 struct rte_sched_subport_profile {
105         /* Token bucket (TB) */
106         uint64_t tb_period;
107         uint64_t tb_credits_per_period;
108         uint64_t tb_size;
109
110         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
111         uint64_t tc_period;
112 };
113
114 struct rte_sched_grinder {
115         /* Pipe cache */
116         uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
117         uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
118         uint32_t pcache_w;
119         uint32_t pcache_r;
120
121         /* Current pipe */
122         enum grinder_state state;
123         uint32_t productive;
124         uint32_t pindex;
125         struct rte_sched_subport *subport;
126         struct rte_sched_pipe *pipe;
127         struct rte_sched_pipe_profile *pipe_params;
128
129         /* TC cache */
130         uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
131         uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
132         uint32_t tccache_w;
133         uint32_t tccache_r;
134
135         /* Current TC */
136         uint32_t tc_index;
137         struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
138         struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
139         uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
140         uint16_t qsize;
141         uint32_t qmask;
142         uint32_t qpos;
143         struct rte_mbuf *pkt;
144
145         /* WRR */
146         uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
147         uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
148         uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
149 };
150
151 struct rte_sched_subport {
152         /* Token bucket (TB) */
153         uint64_t tb_time; /* time of last update */
154         uint64_t tb_period;
155         uint64_t tb_credits_per_period;
156         uint64_t tb_size;
157         uint64_t tb_credits;
158
159         /* Traffic classes (TCs) */
160         uint64_t tc_time; /* time of next update */
161         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
162         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
163         uint64_t tc_period;
164
165         /* TC oversubscription */
166         uint64_t tc_ov_wm;
167         uint64_t tc_ov_wm_min;
168         uint64_t tc_ov_wm_max;
169         uint8_t tc_ov_period_id;
170         uint8_t tc_ov;
171         uint32_t tc_ov_n;
172         double tc_ov_rate;
173
174         /* Statistics */
175         struct rte_sched_subport_stats stats __rte_cache_aligned;
176
177         /* Subport pipes */
178         uint32_t n_pipes_per_subport_enabled;
179         uint32_t n_pipe_profiles;
180         uint32_t n_max_pipe_profiles;
181
182         /* Pipe best-effort TC rate */
183         uint64_t pipe_tc_be_rate_max;
184
185         /* Pipe queues size */
186         uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
187
188 #ifdef RTE_SCHED_RED
189         struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
190 #endif
191
192         /* Scheduling loop detection */
193         uint32_t pipe_loop;
194         uint32_t pipe_exhaustion;
195
196         /* Bitmap */
197         struct rte_bitmap *bmp;
198         uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
199
200         /* Grinders */
201         struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
202         uint32_t busy_grinders;
203
204         /* Queue base calculation */
205         uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
206         uint32_t qsize_sum;
207
208         struct rte_sched_pipe *pipe;
209         struct rte_sched_queue *queue;
210         struct rte_sched_queue_extra *queue_extra;
211         struct rte_sched_pipe_profile *pipe_profiles;
212         uint8_t *bmp_array;
213         struct rte_mbuf **queue_array;
214         uint8_t memory[0] __rte_cache_aligned;
215 } __rte_cache_aligned;
216
217 struct rte_sched_port {
218         /* User parameters */
219         uint32_t n_subports_per_port;
220         uint32_t n_pipes_per_subport;
221         uint32_t n_pipes_per_subport_log2;
222         uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
223         uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
224         uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
225         uint32_t n_subport_profiles;
226         uint32_t n_max_subport_profiles;
227         uint64_t rate;
228         uint32_t mtu;
229         uint32_t frame_overhead;
230         int socket;
231
232         /* Timing */
233         uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cyles */
234         uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
235         uint64_t time;                /* Current NIC TX time measured in bytes */
236         struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
237         uint64_t cycles_per_byte;
238
239         /* Grinders */
240         struct rte_mbuf **pkts_out;
241         uint32_t n_pkts_out;
242         uint32_t subport_id;
243
244         /* Large data structures */
245         struct rte_sched_subport_profile *subport_profiles;
246         struct rte_sched_subport *subports[0] __rte_cache_aligned;
247 } __rte_cache_aligned;
248
249 enum rte_sched_subport_array {
250         e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
251         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
252         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
253         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
254         e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
255         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
256         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
257 };
258
259 static inline uint32_t
260 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
261 {
262         return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
263 }
264
265 static inline struct rte_mbuf **
266 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
267 {
268         uint32_t pindex = qindex >> 4;
269         uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
270
271         return (subport->queue_array + pindex *
272                 subport->qsize_sum + subport->qsize_add[qpos]);
273 }
274
275 static inline uint16_t
276 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
277 struct rte_sched_subport *subport, uint32_t qindex)
278 {
279         uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
280
281         return subport->qsize[tc];
282 }
283
284 static inline uint32_t
285 rte_sched_port_queues_per_port(struct rte_sched_port *port)
286 {
287         uint32_t n_queues = 0, i;
288
289         for (i = 0; i < port->n_subports_per_port; i++)
290                 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
291
292         return n_queues;
293 }
294
295 static inline uint16_t
296 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
297 {
298         uint16_t pipe_queue = port->pipe_queue[traffic_class];
299
300         return pipe_queue;
301 }
302
303 static inline uint8_t
304 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
305 {
306         uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
307
308         return pipe_tc;
309 }
310
311 static inline uint8_t
312 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
313 {
314         uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
315
316         return tc_queue;
317 }
318
319 static int
320 pipe_profile_check(struct rte_sched_pipe_params *params,
321         uint64_t rate, uint16_t *qsize)
322 {
323         uint32_t i;
324
325         /* Pipe parameters */
326         if (params == NULL) {
327                 RTE_LOG(ERR, SCHED,
328                         "%s: Incorrect value for parameter params\n", __func__);
329                 return -EINVAL;
330         }
331
332         /* TB rate: non-zero, not greater than port rate */
333         if (params->tb_rate == 0 ||
334                 params->tb_rate > rate) {
335                 RTE_LOG(ERR, SCHED,
336                         "%s: Incorrect value for tb rate\n", __func__);
337                 return -EINVAL;
338         }
339
340         /* TB size: non-zero */
341         if (params->tb_size == 0) {
342                 RTE_LOG(ERR, SCHED,
343                         "%s: Incorrect value for tb size\n", __func__);
344                 return -EINVAL;
345         }
346
347         /* TC rate: non-zero if qsize non-zero, less than pipe rate */
348         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
349                 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
350                         (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
351                         params->tc_rate[i] > params->tb_rate))) {
352                         RTE_LOG(ERR, SCHED,
353                                 "%s: Incorrect value for qsize or tc_rate\n", __func__);
354                         return -EINVAL;
355                 }
356         }
357
358         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
359                 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
360                 RTE_LOG(ERR, SCHED,
361                         "%s: Incorrect value for be traffic class rate\n", __func__);
362                 return -EINVAL;
363         }
364
365         /* TC period: non-zero */
366         if (params->tc_period == 0) {
367                 RTE_LOG(ERR, SCHED,
368                         "%s: Incorrect value for tc period\n", __func__);
369                 return -EINVAL;
370         }
371
372         /*  Best effort tc oversubscription weight: non-zero */
373         if (params->tc_ov_weight == 0) {
374                 RTE_LOG(ERR, SCHED,
375                         "%s: Incorrect value for tc ov weight\n", __func__);
376                 return -EINVAL;
377         }
378
379         /* Queue WRR weights: non-zero */
380         for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
381                 if (params->wrr_weights[i] == 0) {
382                         RTE_LOG(ERR, SCHED,
383                                 "%s: Incorrect value for wrr weight\n", __func__);
384                         return -EINVAL;
385                 }
386         }
387
388         return 0;
389 }
390
391 static int
392 subport_profile_check(struct rte_sched_subport_profile_params *params,
393         uint64_t rate)
394 {
395         uint32_t i;
396
397         /* Check user parameters */
398         if (params == NULL) {
399                 RTE_LOG(ERR, SCHED, "%s: "
400                 "Incorrect value for parameter params\n", __func__);
401                 return -EINVAL;
402         }
403
404         if (params->tb_rate == 0 || params->tb_rate > rate) {
405                 RTE_LOG(ERR, SCHED, "%s: "
406                 "Incorrect value for tb rate\n", __func__);
407                 return -EINVAL;
408         }
409
410         if (params->tb_size == 0) {
411                 RTE_LOG(ERR, SCHED, "%s: "
412                 "Incorrect value for tb size\n", __func__);
413                 return -EINVAL;
414         }
415
416         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
417                 uint64_t tc_rate = params->tc_rate[i];
418
419                 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
420                         RTE_LOG(ERR, SCHED, "%s: "
421                         "Incorrect value for tc rate\n", __func__);
422                         return -EINVAL;
423                 }
424         }
425
426         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
427                 RTE_LOG(ERR, SCHED, "%s: "
428                 "Incorrect tc rate(best effort)\n", __func__);
429                 return -EINVAL;
430         }
431
432         if (params->tc_period == 0) {
433                 RTE_LOG(ERR, SCHED, "%s: "
434                 "Incorrect value for tc period\n", __func__);
435                 return -EINVAL;
436         }
437
438         return 0;
439 }
440
441 static int
442 rte_sched_port_check_params(struct rte_sched_port_params *params)
443 {
444         uint32_t i;
445
446         if (params == NULL) {
447                 RTE_LOG(ERR, SCHED,
448                         "%s: Incorrect value for parameter params\n", __func__);
449                 return -EINVAL;
450         }
451
452         /* socket */
453         if (params->socket < 0) {
454                 RTE_LOG(ERR, SCHED,
455                         "%s: Incorrect value for socket id\n", __func__);
456                 return -EINVAL;
457         }
458
459         /* rate */
460         if (params->rate == 0) {
461                 RTE_LOG(ERR, SCHED,
462                         "%s: Incorrect value for rate\n", __func__);
463                 return -EINVAL;
464         }
465
466         /* mtu */
467         if (params->mtu == 0) {
468                 RTE_LOG(ERR, SCHED,
469                         "%s: Incorrect value for mtu\n", __func__);
470                 return -EINVAL;
471         }
472
473         /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
474         if (params->n_subports_per_port == 0 ||
475             params->n_subports_per_port > 1u << 16 ||
476             !rte_is_power_of_2(params->n_subports_per_port)) {
477                 RTE_LOG(ERR, SCHED,
478                         "%s: Incorrect value for number of subports\n", __func__);
479                 return -EINVAL;
480         }
481
482         if (params->subport_profiles == NULL ||
483                 params->n_subport_profiles == 0 ||
484                 params->n_max_subport_profiles == 0 ||
485                 params->n_subport_profiles > params->n_max_subport_profiles) {
486                 RTE_LOG(ERR, SCHED,
487                 "%s: Incorrect value for subport profiles\n", __func__);
488                 return -EINVAL;
489         }
490
491         for (i = 0; i < params->n_subport_profiles; i++) {
492                 struct rte_sched_subport_profile_params *p =
493                                                 params->subport_profiles + i;
494                 int status;
495
496                 status = subport_profile_check(p, params->rate);
497                 if (status != 0) {
498                         RTE_LOG(ERR, SCHED,
499                         "%s: subport profile check failed(%d)\n",
500                         __func__, status);
501                         return -EINVAL;
502                 }
503         }
504
505         /* n_pipes_per_subport: non-zero, power of 2 */
506         if (params->n_pipes_per_subport == 0 ||
507             !rte_is_power_of_2(params->n_pipes_per_subport)) {
508                 RTE_LOG(ERR, SCHED,
509                         "%s: Incorrect value for maximum pipes number\n", __func__);
510                 return -EINVAL;
511         }
512
513         return 0;
514 }
515
516 static uint32_t
517 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
518         enum rte_sched_subport_array array)
519 {
520         uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
521         uint32_t n_subport_pipe_queues =
522                 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
523
524         uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
525         uint32_t size_queue =
526                 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
527         uint32_t size_queue_extra
528                 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
529         uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
530                 sizeof(struct rte_sched_pipe_profile);
531         uint32_t size_bmp_array =
532                 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
533         uint32_t size_per_pipe_queue_array, size_queue_array;
534
535         uint32_t base, i;
536
537         size_per_pipe_queue_array = 0;
538         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
539                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
540                         size_per_pipe_queue_array +=
541                                 params->qsize[i] * sizeof(struct rte_mbuf *);
542                 else
543                         size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
544                                 params->qsize[i] * sizeof(struct rte_mbuf *);
545         }
546         size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
547
548         base = 0;
549
550         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
551                 return base;
552         base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
553
554         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
555                 return base;
556         base += RTE_CACHE_LINE_ROUNDUP(size_queue);
557
558         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
559                 return base;
560         base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
561
562         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
563                 return base;
564         base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
565
566         if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
567                 return base;
568         base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
569
570         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
571                 return base;
572         base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
573
574         return base;
575 }
576
577 static void
578 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
579 {
580         uint32_t i;
581
582         subport->qsize_add[0] = 0;
583
584         /* Strict prority traffic class */
585         for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
586                 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
587
588         /* Best-effort traffic class */
589         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
590                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
591                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
592         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
593                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
594                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
595         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
596                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
597                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
598
599         subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
600                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
601 }
602
603 static void
604 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
605 {
606         struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
607
608         RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
609                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
610                 "       Traffic classes: period = %"PRIu64",\n"
611                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
612                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
613                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
614                 "       Best-effort traffic class oversubscription: weight = %hhu\n"
615                 "       WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
616                 i,
617
618                 /* Token bucket */
619                 p->tb_period,
620                 p->tb_credits_per_period,
621                 p->tb_size,
622
623                 /* Traffic classes */
624                 p->tc_period,
625                 p->tc_credits_per_period[0],
626                 p->tc_credits_per_period[1],
627                 p->tc_credits_per_period[2],
628                 p->tc_credits_per_period[3],
629                 p->tc_credits_per_period[4],
630                 p->tc_credits_per_period[5],
631                 p->tc_credits_per_period[6],
632                 p->tc_credits_per_period[7],
633                 p->tc_credits_per_period[8],
634                 p->tc_credits_per_period[9],
635                 p->tc_credits_per_period[10],
636                 p->tc_credits_per_period[11],
637                 p->tc_credits_per_period[12],
638
639                 /* Best-effort traffic class oversubscription */
640                 p->tc_ov_weight,
641
642                 /* WRR */
643                 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
644 }
645
646 static void
647 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
648 {
649         struct rte_sched_subport_profile *p = port->subport_profiles + i;
650
651         RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
652         "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
653         "size = %"PRIu64"\n"
654         "Traffic classes: period = %"PRIu64",\n"
655         "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
656         " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
657         " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
658         i,
659
660         /* Token bucket */
661         p->tb_period,
662         p->tb_credits_per_period,
663         p->tb_size,
664
665         /* Traffic classes */
666         p->tc_period,
667         p->tc_credits_per_period[0],
668         p->tc_credits_per_period[1],
669         p->tc_credits_per_period[2],
670         p->tc_credits_per_period[3],
671         p->tc_credits_per_period[4],
672         p->tc_credits_per_period[5],
673         p->tc_credits_per_period[6],
674         p->tc_credits_per_period[7],
675         p->tc_credits_per_period[8],
676         p->tc_credits_per_period[9],
677         p->tc_credits_per_period[10],
678         p->tc_credits_per_period[11],
679         p->tc_credits_per_period[12]);
680 }
681
682 static inline uint64_t
683 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
684 {
685         uint64_t time = time_ms;
686
687         time = (time * rate) / 1000;
688
689         return time;
690 }
691
692 static void
693 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
694         struct rte_sched_pipe_params *src,
695         struct rte_sched_pipe_profile *dst,
696         uint64_t rate)
697 {
698         uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
699         uint32_t lcd1, lcd2, lcd;
700         uint32_t i;
701
702         /* Token Bucket */
703         if (src->tb_rate == rate) {
704                 dst->tb_credits_per_period = 1;
705                 dst->tb_period = 1;
706         } else {
707                 double tb_rate = (double) src->tb_rate
708                                 / (double) rate;
709                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
710
711                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
712                         &dst->tb_period);
713         }
714
715         dst->tb_size = src->tb_size;
716
717         /* Traffic Classes */
718         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
719                                                 rate);
720
721         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
722                 if (subport->qsize[i])
723                         dst->tc_credits_per_period[i]
724                                 = rte_sched_time_ms_to_bytes(src->tc_period,
725                                         src->tc_rate[i]);
726
727         dst->tc_ov_weight = src->tc_ov_weight;
728
729         /* WRR queues */
730         wrr_cost[0] = src->wrr_weights[0];
731         wrr_cost[1] = src->wrr_weights[1];
732         wrr_cost[2] = src->wrr_weights[2];
733         wrr_cost[3] = src->wrr_weights[3];
734
735         lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
736         lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
737         lcd = rte_get_lcd(lcd1, lcd2);
738
739         wrr_cost[0] = lcd / wrr_cost[0];
740         wrr_cost[1] = lcd / wrr_cost[1];
741         wrr_cost[2] = lcd / wrr_cost[2];
742         wrr_cost[3] = lcd / wrr_cost[3];
743
744         dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
745         dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
746         dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
747         dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
748 }
749
750 static void
751 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
752         struct rte_sched_subport_profile *dst,
753         uint64_t rate)
754 {
755         uint32_t i;
756
757         /* Token Bucket */
758         if (src->tb_rate == rate) {
759                 dst->tb_credits_per_period = 1;
760                 dst->tb_period = 1;
761         } else {
762                 double tb_rate = (double) src->tb_rate
763                                 / (double) rate;
764                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
765
766                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
767                         &dst->tb_period);
768         }
769
770         dst->tb_size = src->tb_size;
771
772         /* Traffic Classes */
773         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
774
775         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
776                 dst->tc_credits_per_period[i]
777                         = rte_sched_time_ms_to_bytes(src->tc_period,
778                                 src->tc_rate[i]);
779 }
780
781 static void
782 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
783         struct rte_sched_subport_params *params, uint64_t rate)
784 {
785         uint32_t i;
786
787         for (i = 0; i < subport->n_pipe_profiles; i++) {
788                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
789                 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
790
791                 rte_sched_pipe_profile_convert(subport, src, dst, rate);
792                 rte_sched_port_log_pipe_profile(subport, i);
793         }
794
795         subport->pipe_tc_be_rate_max = 0;
796         for (i = 0; i < subport->n_pipe_profiles; i++) {
797                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
798                 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
799
800                 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
801                         subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
802         }
803 }
804
805 static void
806 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
807         struct rte_sched_port_params *params,
808         uint64_t rate)
809 {
810         uint32_t i;
811
812         for (i = 0; i < port->n_subport_profiles; i++) {
813                 struct rte_sched_subport_profile_params *src
814                                 = params->subport_profiles + i;
815                 struct rte_sched_subport_profile *dst
816                                 = port->subport_profiles + i;
817
818                 rte_sched_subport_profile_convert(src, dst, rate);
819                 rte_sched_port_log_subport_profile(port, i);
820         }
821 }
822
823 static int
824 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
825         uint32_t n_max_pipes_per_subport,
826         uint64_t rate)
827 {
828         uint32_t i;
829
830         /* Check user parameters */
831         if (params == NULL) {
832                 RTE_LOG(ERR, SCHED,
833                         "%s: Incorrect value for parameter params\n", __func__);
834                 return -EINVAL;
835         }
836
837         if (params->tb_rate == 0 || params->tb_rate > rate) {
838                 RTE_LOG(ERR, SCHED,
839                         "%s: Incorrect value for tb rate\n", __func__);
840                 return -EINVAL;
841         }
842
843         if (params->tb_size == 0) {
844                 RTE_LOG(ERR, SCHED,
845                         "%s: Incorrect value for tb size\n", __func__);
846                 return -EINVAL;
847         }
848
849         /* qsize: if non-zero, power of 2,
850          * no bigger than 32K (due to 16-bit read/write pointers)
851          */
852         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
853                 uint16_t qsize = params->qsize[i];
854
855                 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
856                         RTE_LOG(ERR, SCHED,
857                                 "%s: Incorrect value for qsize\n", __func__);
858                         return -EINVAL;
859                 }
860         }
861
862         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
863                 uint64_t tc_rate = params->tc_rate[i];
864                 uint16_t qsize = params->qsize[i];
865
866                 if ((qsize == 0 && tc_rate != 0) ||
867                         (qsize != 0 && tc_rate == 0) ||
868                         (tc_rate > params->tb_rate)) {
869                         RTE_LOG(ERR, SCHED,
870                                 "%s: Incorrect value for tc rate\n", __func__);
871                         return -EINVAL;
872                 }
873         }
874
875         if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
876                 params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
877                 RTE_LOG(ERR, SCHED,
878                         "%s: Incorrect qsize or tc rate(best effort)\n", __func__);
879                 return -EINVAL;
880         }
881
882         if (params->tc_period == 0) {
883                 RTE_LOG(ERR, SCHED,
884                         "%s: Incorrect value for tc period\n", __func__);
885                 return -EINVAL;
886         }
887
888         /* n_pipes_per_subport: non-zero, power of 2 */
889         if (params->n_pipes_per_subport_enabled == 0 ||
890                 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
891             !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
892                 RTE_LOG(ERR, SCHED,
893                         "%s: Incorrect value for pipes number\n", __func__);
894                 return -EINVAL;
895         }
896
897         /* pipe_profiles and n_pipe_profiles */
898         if (params->pipe_profiles == NULL ||
899             params->n_pipe_profiles == 0 ||
900                 params->n_max_pipe_profiles == 0 ||
901                 params->n_pipe_profiles > params->n_max_pipe_profiles) {
902                 RTE_LOG(ERR, SCHED,
903                         "%s: Incorrect value for pipe profiles\n", __func__);
904                 return -EINVAL;
905         }
906
907         for (i = 0; i < params->n_pipe_profiles; i++) {
908                 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
909                 int status;
910
911                 status = pipe_profile_check(p, rate, &params->qsize[0]);
912                 if (status != 0) {
913                         RTE_LOG(ERR, SCHED,
914                                 "%s: Pipe profile check failed(%d)\n", __func__, status);
915                         return -EINVAL;
916                 }
917         }
918
919         return 0;
920 }
921
922 uint32_t
923 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
924         struct rte_sched_subport_params **subport_params)
925 {
926         uint32_t size0 = 0, size1 = 0, i;
927         int status;
928
929         status = rte_sched_port_check_params(port_params);
930         if (status != 0) {
931                 RTE_LOG(ERR, SCHED,
932                         "%s: Port scheduler port params check failed (%d)\n",
933                         __func__, status);
934
935                 return 0;
936         }
937
938         for (i = 0; i < port_params->n_subports_per_port; i++) {
939                 struct rte_sched_subport_params *sp = subport_params[i];
940
941                 status = rte_sched_subport_check_params(sp,
942                                 port_params->n_pipes_per_subport,
943                                 port_params->rate);
944                 if (status != 0) {
945                         RTE_LOG(ERR, SCHED,
946                                 "%s: Port scheduler subport params check failed (%d)\n",
947                                 __func__, status);
948
949                         return 0;
950                 }
951         }
952
953         size0 = sizeof(struct rte_sched_port);
954
955         for (i = 0; i < port_params->n_subports_per_port; i++) {
956                 struct rte_sched_subport_params *sp = subport_params[i];
957
958                 size1 += rte_sched_subport_get_array_base(sp,
959                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
960         }
961
962         return size0 + size1;
963 }
964
965 struct rte_sched_port *
966 rte_sched_port_config(struct rte_sched_port_params *params)
967 {
968         struct rte_sched_port *port = NULL;
969         uint32_t size0, size1, size2;
970         uint32_t cycles_per_byte;
971         uint32_t i, j;
972         int status;
973
974         status = rte_sched_port_check_params(params);
975         if (status != 0) {
976                 RTE_LOG(ERR, SCHED,
977                         "%s: Port scheduler params check failed (%d)\n",
978                         __func__, status);
979                 return NULL;
980         }
981
982         size0 = sizeof(struct rte_sched_port);
983         size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
984         size2 = params->n_max_subport_profiles *
985                 sizeof(struct rte_sched_subport_profile);
986
987         /* Allocate memory to store the data structures */
988         port = rte_zmalloc_socket("qos_params", size0 + size1,
989                                  RTE_CACHE_LINE_SIZE, params->socket);
990         if (port == NULL) {
991                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
992
993                 return NULL;
994         }
995
996         /* Allocate memory to store the subport profile */
997         port->subport_profiles  = rte_zmalloc_socket("subport_profile", size2,
998                                         RTE_CACHE_LINE_SIZE, params->socket);
999         if (port == NULL) {
1000                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
1001
1002                 return NULL;
1003         }
1004
1005         /* User parameters */
1006         port->n_subports_per_port = params->n_subports_per_port;
1007         port->n_subport_profiles = params->n_subport_profiles;
1008         port->n_max_subport_profiles = params->n_max_subport_profiles;
1009         port->n_pipes_per_subport = params->n_pipes_per_subport;
1010         port->n_pipes_per_subport_log2 =
1011                         __builtin_ctz(params->n_pipes_per_subport);
1012         port->socket = params->socket;
1013
1014         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1015                 port->pipe_queue[i] = i;
1016
1017         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1018                 port->pipe_tc[i] = j;
1019
1020                 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
1021                         j++;
1022         }
1023
1024         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1025                 port->tc_queue[i] = j;
1026
1027                 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
1028                         j++;
1029         }
1030         port->rate = params->rate;
1031         port->mtu = params->mtu + params->frame_overhead;
1032         port->frame_overhead = params->frame_overhead;
1033
1034         /* Timing */
1035         port->time_cpu_cycles = rte_get_tsc_cycles();
1036         port->time_cpu_bytes = 0;
1037         port->time = 0;
1038
1039         /* Subport profile table */
1040         rte_sched_port_config_subport_profile_table(port, params, port->rate);
1041
1042         cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1043                 / params->rate;
1044         port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1045         port->cycles_per_byte = cycles_per_byte;
1046
1047         /* Grinders */
1048         port->pkts_out = NULL;
1049         port->n_pkts_out = 0;
1050         port->subport_id = 0;
1051
1052         return port;
1053 }
1054
1055 static inline void
1056 rte_sched_subport_free(struct rte_sched_port *port,
1057         struct rte_sched_subport *subport)
1058 {
1059         uint32_t n_subport_pipe_queues;
1060         uint32_t qindex;
1061
1062         if (subport == NULL)
1063                 return;
1064
1065         n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1066
1067         /* Free enqueued mbufs */
1068         for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1069                 struct rte_mbuf **mbufs =
1070                         rte_sched_subport_pipe_qbase(subport, qindex);
1071                 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1072                 if (qsize != 0) {
1073                         struct rte_sched_queue *queue = subport->queue + qindex;
1074                         uint16_t qr = queue->qr & (qsize - 1);
1075                         uint16_t qw = queue->qw & (qsize - 1);
1076
1077                         for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1078                                 rte_pktmbuf_free(mbufs[qr]);
1079                 }
1080         }
1081
1082         rte_free(subport);
1083 }
1084
1085 void
1086 rte_sched_port_free(struct rte_sched_port *port)
1087 {
1088         uint32_t i;
1089
1090         /* Check user parameters */
1091         if (port == NULL)
1092                 return;
1093
1094         for (i = 0; i < port->n_subports_per_port; i++)
1095                 rte_sched_subport_free(port, port->subports[i]);
1096
1097         rte_free(port->subport_profiles);
1098         rte_free(port);
1099 }
1100
1101 static void
1102 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
1103 {
1104         struct rte_sched_subport *s = port->subports[i];
1105
1106         RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
1107                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64
1108                 ", size = %"PRIu64"\n"
1109                 "       Traffic classes: period = %"PRIu64"\n"
1110                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
1111                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
1112                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
1113                 "       Best effort traffic class oversubscription: wm min = %"PRIu64
1114                 ", wm max = %"PRIu64"\n",
1115                 i,
1116
1117                 /* Token bucket */
1118                 s->tb_period,
1119                 s->tb_credits_per_period,
1120                 s->tb_size,
1121
1122                 /* Traffic classes */
1123                 s->tc_period,
1124                 s->tc_credits_per_period[0],
1125                 s->tc_credits_per_period[1],
1126                 s->tc_credits_per_period[2],
1127                 s->tc_credits_per_period[3],
1128                 s->tc_credits_per_period[4],
1129                 s->tc_credits_per_period[5],
1130                 s->tc_credits_per_period[6],
1131                 s->tc_credits_per_period[7],
1132                 s->tc_credits_per_period[8],
1133                 s->tc_credits_per_period[9],
1134                 s->tc_credits_per_period[10],
1135                 s->tc_credits_per_period[11],
1136                 s->tc_credits_per_period[12],
1137
1138                 /* Best effort traffic class oversubscription */
1139                 s->tc_ov_wm_min,
1140                 s->tc_ov_wm_max);
1141 }
1142
1143 static void
1144 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1145 {
1146         uint32_t i;
1147
1148         for (i = 0; i < n_subports; i++) {
1149                 struct rte_sched_subport *subport = port->subports[i];
1150
1151                 rte_sched_subport_free(port, subport);
1152         }
1153
1154         rte_free(port->subport_profiles);
1155         rte_free(port);
1156 }
1157
1158 int
1159 rte_sched_subport_config(struct rte_sched_port *port,
1160         uint32_t subport_id,
1161         struct rte_sched_subport_params *params)
1162 {
1163         struct rte_sched_subport *s = NULL;
1164         uint32_t n_subports = subport_id;
1165         uint32_t n_subport_pipe_queues, i;
1166         uint32_t size0, size1, bmp_mem_size;
1167         int status;
1168
1169         /* Check user parameters */
1170         if (port == NULL) {
1171                 RTE_LOG(ERR, SCHED,
1172                         "%s: Incorrect value for parameter port\n", __func__);
1173                 return 0;
1174         }
1175
1176         if (subport_id >= port->n_subports_per_port) {
1177                 RTE_LOG(ERR, SCHED,
1178                         "%s: Incorrect value for subport id\n", __func__);
1179
1180                 rte_sched_free_memory(port, n_subports);
1181                 return -EINVAL;
1182         }
1183
1184         status = rte_sched_subport_check_params(params,
1185                 port->n_pipes_per_subport,
1186                 port->rate);
1187         if (status != 0) {
1188                 RTE_LOG(NOTICE, SCHED,
1189                         "%s: Port scheduler params check failed (%d)\n",
1190                         __func__, status);
1191
1192                 rte_sched_free_memory(port, n_subports);
1193                 return -EINVAL;
1194         }
1195
1196         /* Determine the amount of memory to allocate */
1197         size0 = sizeof(struct rte_sched_subport);
1198         size1 = rte_sched_subport_get_array_base(params,
1199                                 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1200
1201         /* Allocate memory to store the data structures */
1202         s = rte_zmalloc_socket("subport_params", size0 + size1,
1203                 RTE_CACHE_LINE_SIZE, port->socket);
1204         if (s == NULL) {
1205                 RTE_LOG(ERR, SCHED,
1206                         "%s: Memory allocation fails\n", __func__);
1207
1208                 rte_sched_free_memory(port, n_subports);
1209                 return -ENOMEM;
1210         }
1211
1212         n_subports++;
1213
1214         /* Port */
1215         port->subports[subport_id] = s;
1216
1217         /* Token Bucket (TB) */
1218         if (params->tb_rate == port->rate) {
1219                 s->tb_credits_per_period = 1;
1220                 s->tb_period = 1;
1221         } else {
1222                 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
1223                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
1224
1225                 rte_approx_64(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
1226         }
1227
1228         s->tb_size = params->tb_size;
1229         s->tb_time = port->time;
1230         s->tb_credits = s->tb_size / 2;
1231
1232         /* Traffic Classes (TCs) */
1233         s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
1234         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1235                 if (params->qsize[i])
1236                         s->tc_credits_per_period[i]
1237                                 = rte_sched_time_ms_to_bytes(params->tc_period,
1238                                         params->tc_rate[i]);
1239         }
1240         s->tc_time = port->time + s->tc_period;
1241         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1242                 if (params->qsize[i])
1243                         s->tc_credits[i] = s->tc_credits_per_period[i];
1244
1245         /* compile time checks */
1246         RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1247         RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1248                 (RTE_SCHED_PORT_N_GRINDERS - 1));
1249
1250         /* User parameters */
1251         s->n_pipes_per_subport_enabled = params->n_pipes_per_subport_enabled;
1252         memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1253         s->n_pipe_profiles = params->n_pipe_profiles;
1254         s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1255
1256 #ifdef RTE_SCHED_RED
1257         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1258                 uint32_t j;
1259
1260                 for (j = 0; j < RTE_COLORS; j++) {
1261                         /* if min/max are both zero, then RED is disabled */
1262                         if ((params->red_params[i][j].min_th |
1263                              params->red_params[i][j].max_th) == 0) {
1264                                 continue;
1265                         }
1266
1267                         if (rte_red_config_init(&s->red_config[i][j],
1268                                 params->red_params[i][j].wq_log2,
1269                                 params->red_params[i][j].min_th,
1270                                 params->red_params[i][j].max_th,
1271                                 params->red_params[i][j].maxp_inv) != 0) {
1272                                 rte_sched_free_memory(port, n_subports);
1273
1274                                 RTE_LOG(NOTICE, SCHED,
1275                                 "%s: RED configuration init fails\n", __func__);
1276                                 return -EINVAL;
1277                         }
1278                 }
1279         }
1280 #endif
1281
1282         /* Scheduling loop detection */
1283         s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1284         s->pipe_exhaustion = 0;
1285
1286         /* Grinders */
1287         s->busy_grinders = 0;
1288
1289         /* Queue base calculation */
1290         rte_sched_subport_config_qsize(s);
1291
1292         /* Large data structures */
1293         s->pipe = (struct rte_sched_pipe *)
1294                 (s->memory + rte_sched_subport_get_array_base(params,
1295                 e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1296         s->queue = (struct rte_sched_queue *)
1297                 (s->memory + rte_sched_subport_get_array_base(params,
1298                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1299         s->queue_extra = (struct rte_sched_queue_extra *)
1300                 (s->memory + rte_sched_subport_get_array_base(params,
1301                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1302         s->pipe_profiles = (struct rte_sched_pipe_profile *)
1303                 (s->memory + rte_sched_subport_get_array_base(params,
1304                 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1305         s->bmp_array =  s->memory + rte_sched_subport_get_array_base(params,
1306                 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1307         s->queue_array = (struct rte_mbuf **)
1308                 (s->memory + rte_sched_subport_get_array_base(params,
1309                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1310
1311         /* Pipe profile table */
1312         rte_sched_subport_config_pipe_profile_table(s, params, port->rate);
1313
1314         /* Bitmap */
1315         n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1316         bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
1317         s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1318                                 bmp_mem_size);
1319         if (s->bmp == NULL) {
1320                 RTE_LOG(ERR, SCHED,
1321                         "%s: Subport bitmap init error\n", __func__);
1322
1323                 rte_sched_free_memory(port, n_subports);
1324                 return -EINVAL;
1325         }
1326
1327         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1328                 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1329
1330 #ifdef RTE_SCHED_SUBPORT_TC_OV
1331         /* TC oversubscription */
1332         s->tc_ov_wm_min = port->mtu;
1333         s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
1334                                                      s->pipe_tc_be_rate_max);
1335         s->tc_ov_wm = s->tc_ov_wm_max;
1336         s->tc_ov_period_id = 0;
1337         s->tc_ov = 0;
1338         s->tc_ov_n = 0;
1339         s->tc_ov_rate = 0;
1340 #endif
1341
1342         rte_sched_port_log_subport_config(port, subport_id);
1343
1344         return 0;
1345 }
1346
1347 int
1348 rte_sched_pipe_config(struct rte_sched_port *port,
1349         uint32_t subport_id,
1350         uint32_t pipe_id,
1351         int32_t pipe_profile)
1352 {
1353         struct rte_sched_subport *s;
1354         struct rte_sched_pipe *p;
1355         struct rte_sched_pipe_profile *params;
1356         uint32_t n_subports = subport_id + 1;
1357         uint32_t deactivate, profile, i;
1358
1359         /* Check user parameters */
1360         profile = (uint32_t) pipe_profile;
1361         deactivate = (pipe_profile < 0);
1362
1363         if (port == NULL) {
1364                 RTE_LOG(ERR, SCHED,
1365                         "%s: Incorrect value for parameter port\n", __func__);
1366                 return -EINVAL;
1367         }
1368
1369         if (subport_id >= port->n_subports_per_port) {
1370                 RTE_LOG(ERR, SCHED,
1371                         "%s: Incorrect value for parameter subport id\n", __func__);
1372
1373                 rte_sched_free_memory(port, n_subports);
1374                 return -EINVAL;
1375         }
1376
1377         s = port->subports[subport_id];
1378         if (pipe_id >= s->n_pipes_per_subport_enabled) {
1379                 RTE_LOG(ERR, SCHED,
1380                         "%s: Incorrect value for parameter pipe id\n", __func__);
1381
1382                 rte_sched_free_memory(port, n_subports);
1383                 return -EINVAL;
1384         }
1385
1386         if (!deactivate && profile >= s->n_pipe_profiles) {
1387                 RTE_LOG(ERR, SCHED,
1388                         "%s: Incorrect value for parameter pipe profile\n", __func__);
1389
1390                 rte_sched_free_memory(port, n_subports);
1391                 return -EINVAL;
1392         }
1393
1394         /* Handle the case when pipe already has a valid configuration */
1395         p = s->pipe + pipe_id;
1396         if (p->tb_time) {
1397                 params = s->pipe_profiles + p->profile;
1398
1399                 double subport_tc_be_rate =
1400                         (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1401                         / (double) s->tc_period;
1402                 double pipe_tc_be_rate =
1403                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1404                         / (double) params->tc_period;
1405                 uint32_t tc_be_ov = s->tc_ov;
1406
1407                 /* Unplug pipe from its subport */
1408                 s->tc_ov_n -= params->tc_ov_weight;
1409                 s->tc_ov_rate -= pipe_tc_be_rate;
1410                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1411
1412                 if (s->tc_ov != tc_be_ov) {
1413                         RTE_LOG(DEBUG, SCHED,
1414                                 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1415                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1416                 }
1417
1418                 /* Reset the pipe */
1419                 memset(p, 0, sizeof(struct rte_sched_pipe));
1420         }
1421
1422         if (deactivate)
1423                 return 0;
1424
1425         /* Apply the new pipe configuration */
1426         p->profile = profile;
1427         params = s->pipe_profiles + p->profile;
1428
1429         /* Token Bucket (TB) */
1430         p->tb_time = port->time;
1431         p->tb_credits = params->tb_size / 2;
1432
1433         /* Traffic Classes (TCs) */
1434         p->tc_time = port->time + params->tc_period;
1435
1436         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1437                 if (s->qsize[i])
1438                         p->tc_credits[i] = params->tc_credits_per_period[i];
1439
1440         {
1441                 /* Subport best effort tc oversubscription */
1442                 double subport_tc_be_rate =
1443                         (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1444                         / (double) s->tc_period;
1445                 double pipe_tc_be_rate =
1446                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1447                         / (double) params->tc_period;
1448                 uint32_t tc_be_ov = s->tc_ov;
1449
1450                 s->tc_ov_n += params->tc_ov_weight;
1451                 s->tc_ov_rate += pipe_tc_be_rate;
1452                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1453
1454                 if (s->tc_ov != tc_be_ov) {
1455                         RTE_LOG(DEBUG, SCHED,
1456                                 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1457                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1458                 }
1459                 p->tc_ov_period_id = s->tc_ov_period_id;
1460                 p->tc_ov_credits = s->tc_ov_wm;
1461         }
1462
1463         return 0;
1464 }
1465
1466 int
1467 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1468         uint32_t subport_id,
1469         struct rte_sched_pipe_params *params,
1470         uint32_t *pipe_profile_id)
1471 {
1472         struct rte_sched_subport *s;
1473         struct rte_sched_pipe_profile *pp;
1474         uint32_t i;
1475         int status;
1476
1477         /* Port */
1478         if (port == NULL) {
1479                 RTE_LOG(ERR, SCHED,
1480                         "%s: Incorrect value for parameter port\n", __func__);
1481                 return -EINVAL;
1482         }
1483
1484         /* Subport id not exceeds the max limit */
1485         if (subport_id > port->n_subports_per_port) {
1486                 RTE_LOG(ERR, SCHED,
1487                         "%s: Incorrect value for subport id\n", __func__);
1488                 return -EINVAL;
1489         }
1490
1491         s = port->subports[subport_id];
1492
1493         /* Pipe profiles exceeds the max limit */
1494         if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1495                 RTE_LOG(ERR, SCHED,
1496                         "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1497                 return -EINVAL;
1498         }
1499
1500         /* Pipe params */
1501         status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1502         if (status != 0) {
1503                 RTE_LOG(ERR, SCHED,
1504                         "%s: Pipe profile check failed(%d)\n", __func__, status);
1505                 return -EINVAL;
1506         }
1507
1508         pp = &s->pipe_profiles[s->n_pipe_profiles];
1509         rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1510
1511         /* Pipe profile should not exists */
1512         for (i = 0; i < s->n_pipe_profiles; i++)
1513                 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1514                         RTE_LOG(ERR, SCHED,
1515                                 "%s: Pipe profile exists\n", __func__);
1516                         return -EINVAL;
1517                 }
1518
1519         /* Pipe profile commit */
1520         *pipe_profile_id = s->n_pipe_profiles;
1521         s->n_pipe_profiles++;
1522
1523         if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1524                 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1525
1526         rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1527
1528         return 0;
1529 }
1530
1531 int
1532 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1533         struct rte_sched_subport_profile_params *params,
1534         uint32_t *subport_profile_id)
1535 {
1536         int status;
1537         uint32_t i;
1538         struct rte_sched_subport_profile *dst;
1539
1540         /* Port */
1541         if (port == NULL) {
1542                 RTE_LOG(ERR, SCHED, "%s: "
1543                 "Incorrect value for parameter port\n", __func__);
1544                 return -EINVAL;
1545         }
1546
1547         if (params == NULL) {
1548                 RTE_LOG(ERR, SCHED, "%s: "
1549                 "Incorrect value for parameter profile\n", __func__);
1550                 return -EINVAL;
1551         }
1552
1553         if (subport_profile_id == NULL) {
1554                 RTE_LOG(ERR, SCHED, "%s: "
1555                 "Incorrect value for parameter subport_profile_id\n",
1556                 __func__);
1557                 return -EINVAL;
1558         }
1559
1560         dst = port->subport_profiles + port->n_subport_profiles;
1561
1562         /* Subport profiles exceeds the max limit */
1563         if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1564                 RTE_LOG(ERR, SCHED, "%s: "
1565                 "Number of subport profiles exceeds the max limit\n",
1566                  __func__);
1567                 return -EINVAL;
1568         }
1569
1570         status = subport_profile_check(params, port->rate);
1571         if (status != 0) {
1572                 RTE_LOG(ERR, SCHED,
1573                 "%s: subport profile check failed(%d)\n", __func__, status);
1574                 return -EINVAL;
1575         }
1576
1577         rte_sched_subport_profile_convert(params, dst, port->rate);
1578
1579         /* Subport profile should not exists */
1580         for (i = 0; i < port->n_subport_profiles; i++)
1581                 if (memcmp(port->subport_profiles + i,
1582                     dst, sizeof(*dst)) == 0) {
1583                         RTE_LOG(ERR, SCHED,
1584                         "%s: subport profile exists\n", __func__);
1585                         return -EINVAL;
1586                 }
1587
1588         /* Subport profile commit */
1589         *subport_profile_id = port->n_subport_profiles;
1590         port->n_subport_profiles++;
1591
1592         rte_sched_port_log_subport_profile(port, *subport_profile_id);
1593
1594         return 0;
1595 }
1596
1597 static inline uint32_t
1598 rte_sched_port_qindex(struct rte_sched_port *port,
1599         uint32_t subport,
1600         uint32_t pipe,
1601         uint32_t traffic_class,
1602         uint32_t queue)
1603 {
1604         return ((subport & (port->n_subports_per_port - 1)) <<
1605                 (port->n_pipes_per_subport_log2 + 4)) |
1606                 ((pipe &
1607                 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1608                 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1609                 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1610 }
1611
1612 void
1613 rte_sched_port_pkt_write(struct rte_sched_port *port,
1614                          struct rte_mbuf *pkt,
1615                          uint32_t subport, uint32_t pipe,
1616                          uint32_t traffic_class,
1617                          uint32_t queue, enum rte_color color)
1618 {
1619         uint32_t queue_id =
1620                 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1621
1622         rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1623 }
1624
1625 void
1626 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1627                                   const struct rte_mbuf *pkt,
1628                                   uint32_t *subport, uint32_t *pipe,
1629                                   uint32_t *traffic_class, uint32_t *queue)
1630 {
1631         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1632
1633         *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1634         *pipe = (queue_id >> 4) &
1635                 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1636         *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1637         *queue = rte_sched_port_tc_queue(port, queue_id);
1638 }
1639
1640 enum rte_color
1641 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1642 {
1643         return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1644 }
1645
1646 int
1647 rte_sched_subport_read_stats(struct rte_sched_port *port,
1648                              uint32_t subport_id,
1649                              struct rte_sched_subport_stats *stats,
1650                              uint32_t *tc_ov)
1651 {
1652         struct rte_sched_subport *s;
1653
1654         /* Check user parameters */
1655         if (port == NULL) {
1656                 RTE_LOG(ERR, SCHED,
1657                         "%s: Incorrect value for parameter port\n", __func__);
1658                 return -EINVAL;
1659         }
1660
1661         if (subport_id >= port->n_subports_per_port) {
1662                 RTE_LOG(ERR, SCHED,
1663                         "%s: Incorrect value for subport id\n", __func__);
1664                 return -EINVAL;
1665         }
1666
1667         if (stats == NULL) {
1668                 RTE_LOG(ERR, SCHED,
1669                         "%s: Incorrect value for parameter stats\n", __func__);
1670                 return -EINVAL;
1671         }
1672
1673         if (tc_ov == NULL) {
1674                 RTE_LOG(ERR, SCHED,
1675                         "%s: Incorrect value for tc_ov\n", __func__);
1676                 return -EINVAL;
1677         }
1678
1679         s = port->subports[subport_id];
1680
1681         /* Copy subport stats and clear */
1682         memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1683         memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1684
1685         /* Subport TC oversubscription status */
1686         *tc_ov = s->tc_ov;
1687
1688         return 0;
1689 }
1690
1691 int
1692 rte_sched_queue_read_stats(struct rte_sched_port *port,
1693         uint32_t queue_id,
1694         struct rte_sched_queue_stats *stats,
1695         uint16_t *qlen)
1696 {
1697         struct rte_sched_subport *s;
1698         struct rte_sched_queue *q;
1699         struct rte_sched_queue_extra *qe;
1700         uint32_t subport_id, subport_qmask, subport_qindex;
1701
1702         /* Check user parameters */
1703         if (port == NULL) {
1704                 RTE_LOG(ERR, SCHED,
1705                         "%s: Incorrect value for parameter port\n", __func__);
1706                 return -EINVAL;
1707         }
1708
1709         if (queue_id >= rte_sched_port_queues_per_port(port)) {
1710                 RTE_LOG(ERR, SCHED,
1711                         "%s: Incorrect value for queue id\n", __func__);
1712                 return -EINVAL;
1713         }
1714
1715         if (stats == NULL) {
1716                 RTE_LOG(ERR, SCHED,
1717                         "%s: Incorrect value for parameter stats\n", __func__);
1718                 return -EINVAL;
1719         }
1720
1721         if (qlen == NULL) {
1722                 RTE_LOG(ERR, SCHED,
1723                         "%s: Incorrect value for parameter qlen\n", __func__);
1724                 return -EINVAL;
1725         }
1726         subport_qmask = port->n_pipes_per_subport_log2 + 4;
1727         subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1728
1729         s = port->subports[subport_id];
1730         subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1731         q = s->queue + subport_qindex;
1732         qe = s->queue_extra + subport_qindex;
1733
1734         /* Copy queue stats and clear */
1735         memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1736         memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1737
1738         /* Queue length */
1739         *qlen = q->qw - q->qr;
1740
1741         return 0;
1742 }
1743
1744 #ifdef RTE_SCHED_DEBUG
1745
1746 static inline int
1747 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1748         uint32_t qindex)
1749 {
1750         struct rte_sched_queue *queue = subport->queue + qindex;
1751
1752         return queue->qr == queue->qw;
1753 }
1754
1755 #endif /* RTE_SCHED_DEBUG */
1756
1757 #ifdef RTE_SCHED_COLLECT_STATS
1758
1759 static inline void
1760 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1761         struct rte_sched_subport *subport,
1762         uint32_t qindex,
1763         struct rte_mbuf *pkt)
1764 {
1765         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1766         uint32_t pkt_len = pkt->pkt_len;
1767
1768         subport->stats.n_pkts_tc[tc_index] += 1;
1769         subport->stats.n_bytes_tc[tc_index] += pkt_len;
1770 }
1771
1772 #ifdef RTE_SCHED_RED
1773 static inline void
1774 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1775         struct rte_sched_subport *subport,
1776         uint32_t qindex,
1777         struct rte_mbuf *pkt,
1778         uint32_t red)
1779 #else
1780 static inline void
1781 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1782         struct rte_sched_subport *subport,
1783         uint32_t qindex,
1784         struct rte_mbuf *pkt,
1785         __rte_unused uint32_t red)
1786 #endif
1787 {
1788         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1789         uint32_t pkt_len = pkt->pkt_len;
1790
1791         subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1792         subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1793 #ifdef RTE_SCHED_RED
1794         subport->stats.n_pkts_red_dropped[tc_index] += red;
1795 #endif
1796 }
1797
1798 static inline void
1799 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1800         uint32_t qindex,
1801         struct rte_mbuf *pkt)
1802 {
1803         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1804         uint32_t pkt_len = pkt->pkt_len;
1805
1806         qe->stats.n_pkts += 1;
1807         qe->stats.n_bytes += pkt_len;
1808 }
1809
1810 #ifdef RTE_SCHED_RED
1811 static inline void
1812 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1813         uint32_t qindex,
1814         struct rte_mbuf *pkt,
1815         uint32_t red)
1816 #else
1817 static inline void
1818 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1819         uint32_t qindex,
1820         struct rte_mbuf *pkt,
1821         __rte_unused uint32_t red)
1822 #endif
1823 {
1824         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1825         uint32_t pkt_len = pkt->pkt_len;
1826
1827         qe->stats.n_pkts_dropped += 1;
1828         qe->stats.n_bytes_dropped += pkt_len;
1829 #ifdef RTE_SCHED_RED
1830         qe->stats.n_pkts_red_dropped += red;
1831 #endif
1832 }
1833
1834 #endif /* RTE_SCHED_COLLECT_STATS */
1835
1836 #ifdef RTE_SCHED_RED
1837
1838 static inline int
1839 rte_sched_port_red_drop(struct rte_sched_port *port,
1840         struct rte_sched_subport *subport,
1841         struct rte_mbuf *pkt,
1842         uint32_t qindex,
1843         uint16_t qlen)
1844 {
1845         struct rte_sched_queue_extra *qe;
1846         struct rte_red_config *red_cfg;
1847         struct rte_red *red;
1848         uint32_t tc_index;
1849         enum rte_color color;
1850
1851         tc_index = rte_sched_port_pipe_tc(port, qindex);
1852         color = rte_sched_port_pkt_read_color(pkt);
1853         red_cfg = &subport->red_config[tc_index][color];
1854
1855         if ((red_cfg->min_th | red_cfg->max_th) == 0)
1856                 return 0;
1857
1858         qe = subport->queue_extra + qindex;
1859         red = &qe->red;
1860
1861         return rte_red_enqueue(red_cfg, red, qlen, port->time);
1862 }
1863
1864 static inline void
1865 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,
1866         struct rte_sched_subport *subport, uint32_t qindex)
1867 {
1868         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1869         struct rte_red *red = &qe->red;
1870
1871         rte_red_mark_queue_empty(red, port->time);
1872 }
1873
1874 #else
1875
1876 static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,
1877         struct rte_sched_subport *subport __rte_unused,
1878         struct rte_mbuf *pkt __rte_unused,
1879         uint32_t qindex __rte_unused,
1880         uint16_t qlen __rte_unused)
1881 {
1882         return 0;
1883 }
1884
1885 #define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex)
1886
1887 #endif /* RTE_SCHED_RED */
1888
1889 #ifdef RTE_SCHED_DEBUG
1890
1891 static inline void
1892 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1893                        uint64_t bmp_slab)
1894 {
1895         uint64_t mask;
1896         uint32_t i, panic;
1897
1898         if (bmp_slab == 0)
1899                 rte_panic("Empty slab at position %u\n", bmp_pos);
1900
1901         panic = 0;
1902         for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1903                 if (mask & bmp_slab) {
1904                         if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1905                                 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1906                                 panic = 1;
1907                         }
1908                 }
1909         }
1910
1911         if (panic)
1912                 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1913                         bmp_slab, bmp_pos);
1914 }
1915
1916 #endif /* RTE_SCHED_DEBUG */
1917
1918 static inline struct rte_sched_subport *
1919 rte_sched_port_subport(struct rte_sched_port *port,
1920         struct rte_mbuf *pkt)
1921 {
1922         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1923         uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1924
1925         return port->subports[subport_id];
1926 }
1927
1928 static inline uint32_t
1929 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1930         struct rte_mbuf *pkt, uint32_t subport_qmask)
1931 {
1932         struct rte_sched_queue *q;
1933 #ifdef RTE_SCHED_COLLECT_STATS
1934         struct rte_sched_queue_extra *qe;
1935 #endif
1936         uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1937         uint32_t subport_queue_id = subport_qmask & qindex;
1938
1939         q = subport->queue + subport_queue_id;
1940         rte_prefetch0(q);
1941 #ifdef RTE_SCHED_COLLECT_STATS
1942         qe = subport->queue_extra + subport_queue_id;
1943         rte_prefetch0(qe);
1944 #endif
1945
1946         return subport_queue_id;
1947 }
1948
1949 static inline void
1950 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1951         struct rte_sched_subport *subport,
1952         uint32_t qindex,
1953         struct rte_mbuf **qbase)
1954 {
1955         struct rte_sched_queue *q;
1956         struct rte_mbuf **q_qw;
1957         uint16_t qsize;
1958
1959         q = subport->queue + qindex;
1960         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1961         q_qw = qbase + (q->qw & (qsize - 1));
1962
1963         rte_prefetch0(q_qw);
1964         rte_bitmap_prefetch0(subport->bmp, qindex);
1965 }
1966
1967 static inline int
1968 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
1969         struct rte_sched_subport *subport,
1970         uint32_t qindex,
1971         struct rte_mbuf **qbase,
1972         struct rte_mbuf *pkt)
1973 {
1974         struct rte_sched_queue *q;
1975         uint16_t qsize;
1976         uint16_t qlen;
1977
1978         q = subport->queue + qindex;
1979         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1980         qlen = q->qw - q->qr;
1981
1982         /* Drop the packet (and update drop stats) when queue is full */
1983         if (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||
1984                      (qlen >= qsize))) {
1985                 rte_pktmbuf_free(pkt);
1986 #ifdef RTE_SCHED_COLLECT_STATS
1987                 rte_sched_port_update_subport_stats_on_drop(port, subport,
1988                         qindex, pkt, qlen < qsize);
1989                 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
1990                         qlen < qsize);
1991 #endif
1992                 return 0;
1993         }
1994
1995         /* Enqueue packet */
1996         qbase[q->qw & (qsize - 1)] = pkt;
1997         q->qw++;
1998
1999         /* Activate queue in the subport bitmap */
2000         rte_bitmap_set(subport->bmp, qindex);
2001
2002         /* Statistics */
2003 #ifdef RTE_SCHED_COLLECT_STATS
2004         rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2005         rte_sched_port_update_queue_stats(subport, qindex, pkt);
2006 #endif
2007
2008         return 1;
2009 }
2010
2011
2012 /*
2013  * The enqueue function implements a 4-level pipeline with each stage
2014  * processing two different packets. The purpose of using a pipeline
2015  * is to hide the latency of prefetching the data structures. The
2016  * naming convention is presented in the diagram below:
2017  *
2018  *   p00  _______   p10  _______   p20  _______   p30  _______
2019  * ----->|       |----->|       |----->|       |----->|       |----->
2020  *       |   0   |      |   1   |      |   2   |      |   3   |
2021  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2022  *   p01            p11            p21            p31
2023  *
2024  */
2025 int
2026 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2027                        uint32_t n_pkts)
2028 {
2029         struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2030                 *pkt30, *pkt31, *pkt_last;
2031         struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2032                 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2033         struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2034                 *subport20, *subport21, *subport30, *subport31, *subport_last;
2035         uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2036         uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2037         uint32_t subport_qmask;
2038         uint32_t result, i;
2039
2040         result = 0;
2041         subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2042
2043         /*
2044          * Less then 6 input packets available, which is not enough to
2045          * feed the pipeline
2046          */
2047         if (unlikely(n_pkts < 6)) {
2048                 struct rte_sched_subport *subports[5];
2049                 struct rte_mbuf **q_base[5];
2050                 uint32_t q[5];
2051
2052                 /* Prefetch the mbuf structure of each packet */
2053                 for (i = 0; i < n_pkts; i++)
2054                         rte_prefetch0(pkts[i]);
2055
2056                 /* Prefetch the subport structure for each packet */
2057                 for (i = 0; i < n_pkts; i++)
2058                         subports[i] = rte_sched_port_subport(port, pkts[i]);
2059
2060                 /* Prefetch the queue structure for each queue */
2061                 for (i = 0; i < n_pkts; i++)
2062                         q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2063                                         pkts[i], subport_qmask);
2064
2065                 /* Prefetch the write pointer location of each queue */
2066                 for (i = 0; i < n_pkts; i++) {
2067                         q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2068                         rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2069                                 q[i], q_base[i]);
2070                 }
2071
2072                 /* Write each packet to its queue */
2073                 for (i = 0; i < n_pkts; i++)
2074                         result += rte_sched_port_enqueue_qwa(port, subports[i],
2075                                                 q[i], q_base[i], pkts[i]);
2076
2077                 return result;
2078         }
2079
2080         /* Feed the first 3 stages of the pipeline (6 packets needed) */
2081         pkt20 = pkts[0];
2082         pkt21 = pkts[1];
2083         rte_prefetch0(pkt20);
2084         rte_prefetch0(pkt21);
2085
2086         pkt10 = pkts[2];
2087         pkt11 = pkts[3];
2088         rte_prefetch0(pkt10);
2089         rte_prefetch0(pkt11);
2090
2091         subport20 = rte_sched_port_subport(port, pkt20);
2092         subport21 = rte_sched_port_subport(port, pkt21);
2093         q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2094                         pkt20, subport_qmask);
2095         q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2096                         pkt21, subport_qmask);
2097
2098         pkt00 = pkts[4];
2099         pkt01 = pkts[5];
2100         rte_prefetch0(pkt00);
2101         rte_prefetch0(pkt01);
2102
2103         subport10 = rte_sched_port_subport(port, pkt10);
2104         subport11 = rte_sched_port_subport(port, pkt11);
2105         q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2106                         pkt10, subport_qmask);
2107         q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2108                         pkt11, subport_qmask);
2109
2110         q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2111         q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2112         rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2113         rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2114
2115         /* Run the pipeline */
2116         for (i = 6; i < (n_pkts & (~1)); i += 2) {
2117                 /* Propagate stage inputs */
2118                 pkt30 = pkt20;
2119                 pkt31 = pkt21;
2120                 pkt20 = pkt10;
2121                 pkt21 = pkt11;
2122                 pkt10 = pkt00;
2123                 pkt11 = pkt01;
2124                 q30 = q20;
2125                 q31 = q21;
2126                 q20 = q10;
2127                 q21 = q11;
2128                 subport30 = subport20;
2129                 subport31 = subport21;
2130                 subport20 = subport10;
2131                 subport21 = subport11;
2132                 q30_base = q20_base;
2133                 q31_base = q21_base;
2134
2135                 /* Stage 0: Get packets in */
2136                 pkt00 = pkts[i];
2137                 pkt01 = pkts[i + 1];
2138                 rte_prefetch0(pkt00);
2139                 rte_prefetch0(pkt01);
2140
2141                 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2142                 subport10 = rte_sched_port_subport(port, pkt10);
2143                 subport11 = rte_sched_port_subport(port, pkt11);
2144                 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2145                                 pkt10, subport_qmask);
2146                 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2147                                 pkt11, subport_qmask);
2148
2149                 /* Stage 2: Prefetch queue write location */
2150                 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2151                 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2152                 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2153                 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2154
2155                 /* Stage 3: Write packet to queue and activate queue */
2156                 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2157                                 q30, q30_base, pkt30);
2158                 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2159                                 q31, q31_base, pkt31);
2160                 result += r30 + r31;
2161         }
2162
2163         /*
2164          * Drain the pipeline (exactly 6 packets).
2165          * Handle the last packet in the case
2166          * of an odd number of input packets.
2167          */
2168         pkt_last = pkts[n_pkts - 1];
2169         rte_prefetch0(pkt_last);
2170
2171         subport00 = rte_sched_port_subport(port, pkt00);
2172         subport01 = rte_sched_port_subport(port, pkt01);
2173         q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2174                         pkt00, subport_qmask);
2175         q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2176                         pkt01, subport_qmask);
2177
2178         q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2179         q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2180         rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2181         rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2182
2183         r20 = rte_sched_port_enqueue_qwa(port, subport20,
2184                         q20, q20_base, pkt20);
2185         r21 = rte_sched_port_enqueue_qwa(port, subport21,
2186                         q21, q21_base, pkt21);
2187         result += r20 + r21;
2188
2189         subport_last = rte_sched_port_subport(port, pkt_last);
2190         q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2191                                 pkt_last, subport_qmask);
2192
2193         q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2194         q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2195         rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2196         rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2197
2198         r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2199                         q10_base, pkt10);
2200         r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2201                         q11_base, pkt11);
2202         result += r10 + r11;
2203
2204         q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2205         rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2206                 q_last, q_last_base);
2207
2208         r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2209                         q00_base, pkt00);
2210         r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2211                         q01_base, pkt01);
2212         result += r00 + r01;
2213
2214         if (n_pkts & 1) {
2215                 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2216                                         q_last, q_last_base, pkt_last);
2217                 result += r_last;
2218         }
2219
2220         return result;
2221 }
2222
2223 #ifndef RTE_SCHED_SUBPORT_TC_OV
2224
2225 static inline void
2226 grinder_credits_update(struct rte_sched_port *port,
2227         struct rte_sched_subport *subport, uint32_t pos)
2228 {
2229         struct rte_sched_grinder *grinder = subport->grinder + pos;
2230         struct rte_sched_pipe *pipe = grinder->pipe;
2231         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2232         uint64_t n_periods;
2233         uint32_t i;
2234
2235         /* Subport TB */
2236         n_periods = (port->time - subport->tb_time) / subport->tb_period;
2237         subport->tb_credits += n_periods * subport->tb_credits_per_period;
2238         subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
2239         subport->tb_time += n_periods * subport->tb_period;
2240
2241         /* Pipe TB */
2242         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2243         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2244         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2245         pipe->tb_time += n_periods * params->tb_period;
2246
2247         /* Subport TCs */
2248         if (unlikely(port->time >= subport->tc_time)) {
2249                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2250                         subport->tc_credits[i] = subport->tc_credits_per_period[i];
2251
2252                 subport->tc_time = port->time + subport->tc_period;
2253         }
2254
2255         /* Pipe TCs */
2256         if (unlikely(port->time >= pipe->tc_time)) {
2257                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2258                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2259
2260                 pipe->tc_time = port->time + params->tc_period;
2261         }
2262 }
2263
2264 #else
2265
2266 static inline uint64_t
2267 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2268         struct rte_sched_subport *subport)
2269 {
2270         uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2271         uint64_t tc_consumption = 0, tc_ov_consumption_max;
2272         uint64_t tc_ov_wm = subport->tc_ov_wm;
2273         uint32_t i;
2274
2275         if (subport->tc_ov == 0)
2276                 return subport->tc_ov_wm_max;
2277
2278         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2279                 tc_ov_consumption[i] =
2280                         subport->tc_credits_per_period[i] - subport->tc_credits[i];
2281                 tc_consumption += tc_ov_consumption[i];
2282         }
2283
2284         tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2285                 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2286                 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2287
2288         tc_ov_consumption_max =
2289                 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2290                         tc_consumption;
2291
2292         if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2293                 (tc_ov_consumption_max - port->mtu)) {
2294                 tc_ov_wm  -= tc_ov_wm >> 7;
2295                 if (tc_ov_wm < subport->tc_ov_wm_min)
2296                         tc_ov_wm = subport->tc_ov_wm_min;
2297
2298                 return tc_ov_wm;
2299         }
2300
2301         tc_ov_wm += (tc_ov_wm >> 7) + 1;
2302         if (tc_ov_wm > subport->tc_ov_wm_max)
2303                 tc_ov_wm = subport->tc_ov_wm_max;
2304
2305         return tc_ov_wm;
2306 }
2307
2308 static inline void
2309 grinder_credits_update(struct rte_sched_port *port,
2310         struct rte_sched_subport *subport, uint32_t pos)
2311 {
2312         struct rte_sched_grinder *grinder = subport->grinder + pos;
2313         struct rte_sched_pipe *pipe = grinder->pipe;
2314         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2315         uint64_t n_periods;
2316         uint32_t i;
2317
2318         /* Subport TB */
2319         n_periods = (port->time - subport->tb_time) / subport->tb_period;
2320         subport->tb_credits += n_periods * subport->tb_credits_per_period;
2321         subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
2322         subport->tb_time += n_periods * subport->tb_period;
2323
2324         /* Pipe TB */
2325         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2326         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2327         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2328         pipe->tb_time += n_periods * params->tb_period;
2329
2330         /* Subport TCs */
2331         if (unlikely(port->time >= subport->tc_time)) {
2332                 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, subport);
2333
2334                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2335                         subport->tc_credits[i] = subport->tc_credits_per_period[i];
2336
2337                 subport->tc_time = port->time + subport->tc_period;
2338                 subport->tc_ov_period_id++;
2339         }
2340
2341         /* Pipe TCs */
2342         if (unlikely(port->time >= pipe->tc_time)) {
2343                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2344                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2345                 pipe->tc_time = port->time + params->tc_period;
2346         }
2347
2348         /* Pipe TCs - Oversubscription */
2349         if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2350                 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2351
2352                 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2353         }
2354 }
2355
2356 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2357
2358
2359 #ifndef RTE_SCHED_SUBPORT_TC_OV
2360
2361 static inline int
2362 grinder_credits_check(struct rte_sched_port *port,
2363         struct rte_sched_subport *subport, uint32_t pos)
2364 {
2365         struct rte_sched_grinder *grinder = subport->grinder + pos;
2366         struct rte_sched_pipe *pipe = grinder->pipe;
2367         struct rte_mbuf *pkt = grinder->pkt;
2368         uint32_t tc_index = grinder->tc_index;
2369         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2370         uint64_t subport_tb_credits = subport->tb_credits;
2371         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2372         uint64_t pipe_tb_credits = pipe->tb_credits;
2373         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2374         int enough_credits;
2375
2376         /* Check queue credits */
2377         enough_credits = (pkt_len <= subport_tb_credits) &&
2378                 (pkt_len <= subport_tc_credits) &&
2379                 (pkt_len <= pipe_tb_credits) &&
2380                 (pkt_len <= pipe_tc_credits);
2381
2382         if (!enough_credits)
2383                 return 0;
2384
2385         /* Update port credits */
2386         subport->tb_credits -= pkt_len;
2387         subport->tc_credits[tc_index] -= pkt_len;
2388         pipe->tb_credits -= pkt_len;
2389         pipe->tc_credits[tc_index] -= pkt_len;
2390
2391         return 1;
2392 }
2393
2394 #else
2395
2396 static inline int
2397 grinder_credits_check(struct rte_sched_port *port,
2398         struct rte_sched_subport *subport, uint32_t pos)
2399 {
2400         struct rte_sched_grinder *grinder = subport->grinder + pos;
2401         struct rte_sched_pipe *pipe = grinder->pipe;
2402         struct rte_mbuf *pkt = grinder->pkt;
2403         uint32_t tc_index = grinder->tc_index;
2404         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2405         uint64_t subport_tb_credits = subport->tb_credits;
2406         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2407         uint64_t pipe_tb_credits = pipe->tb_credits;
2408         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2409         uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2410         uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2411         uint64_t pipe_tc_ov_credits;
2412         uint32_t i;
2413         int enough_credits;
2414
2415         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2416                 pipe_tc_ov_mask1[i] = ~0LLU;
2417
2418         pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2419         pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2420         pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2421
2422         /* Check pipe and subport credits */
2423         enough_credits = (pkt_len <= subport_tb_credits) &&
2424                 (pkt_len <= subport_tc_credits) &&
2425                 (pkt_len <= pipe_tb_credits) &&
2426                 (pkt_len <= pipe_tc_credits) &&
2427                 (pkt_len <= pipe_tc_ov_credits);
2428
2429         if (!enough_credits)
2430                 return 0;
2431
2432         /* Update pipe and subport credits */
2433         subport->tb_credits -= pkt_len;
2434         subport->tc_credits[tc_index] -= pkt_len;
2435         pipe->tb_credits -= pkt_len;
2436         pipe->tc_credits[tc_index] -= pkt_len;
2437         pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2438
2439         return 1;
2440 }
2441
2442 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2443
2444
2445 static inline int
2446 grinder_schedule(struct rte_sched_port *port,
2447         struct rte_sched_subport *subport, uint32_t pos)
2448 {
2449         struct rte_sched_grinder *grinder = subport->grinder + pos;
2450         struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2451         struct rte_mbuf *pkt = grinder->pkt;
2452         uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2453         uint32_t be_tc_active;
2454
2455         if (!grinder_credits_check(port, subport, pos))
2456                 return 0;
2457
2458         /* Advance port time */
2459         port->time += pkt_len;
2460
2461         /* Send packet */
2462         port->pkts_out[port->n_pkts_out++] = pkt;
2463         queue->qr++;
2464
2465         be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2466         grinder->wrr_tokens[grinder->qpos] +=
2467                 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2468
2469         if (queue->qr == queue->qw) {
2470                 uint32_t qindex = grinder->qindex[grinder->qpos];
2471
2472                 rte_bitmap_clear(subport->bmp, qindex);
2473                 grinder->qmask &= ~(1 << grinder->qpos);
2474                 if (be_tc_active)
2475                         grinder->wrr_mask[grinder->qpos] = 0;
2476                 rte_sched_port_set_queue_empty_timestamp(port, subport, qindex);
2477         }
2478
2479         /* Reset pipe loop detection */
2480         subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2481         grinder->productive = 1;
2482
2483         return 1;
2484 }
2485
2486 #ifdef SCHED_VECTOR_SSE4
2487
2488 static inline int
2489 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2490 {
2491         __m128i index = _mm_set1_epi32(base_pipe);
2492         __m128i pipes = _mm_load_si128((__m128i *)subport->grinder_base_bmp_pos);
2493         __m128i res = _mm_cmpeq_epi32(pipes, index);
2494
2495         pipes = _mm_load_si128((__m128i *)(subport->grinder_base_bmp_pos + 4));
2496         pipes = _mm_cmpeq_epi32(pipes, index);
2497         res = _mm_or_si128(res, pipes);
2498
2499         if (_mm_testz_si128(res, res))
2500                 return 0;
2501
2502         return 1;
2503 }
2504
2505 #elif defined(SCHED_VECTOR_NEON)
2506
2507 static inline int
2508 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2509 {
2510         uint32x4_t index, pipes;
2511         uint32_t *pos = (uint32_t *)subport->grinder_base_bmp_pos;
2512
2513         index = vmovq_n_u32(base_pipe);
2514         pipes = vld1q_u32(pos);
2515         if (!vminvq_u32(veorq_u32(pipes, index)))
2516                 return 1;
2517
2518         pipes = vld1q_u32(pos + 4);
2519         if (!vminvq_u32(veorq_u32(pipes, index)))
2520                 return 1;
2521
2522         return 0;
2523 }
2524
2525 #else
2526
2527 static inline int
2528 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2529 {
2530         uint32_t i;
2531
2532         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2533                 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2534                         return 1;
2535         }
2536
2537         return 0;
2538 }
2539
2540 #endif /* RTE_SCHED_OPTIMIZATIONS */
2541
2542 static inline void
2543 grinder_pcache_populate(struct rte_sched_subport *subport,
2544         uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2545 {
2546         struct rte_sched_grinder *grinder = subport->grinder + pos;
2547         uint16_t w[4];
2548
2549         grinder->pcache_w = 0;
2550         grinder->pcache_r = 0;
2551
2552         w[0] = (uint16_t) bmp_slab;
2553         w[1] = (uint16_t) (bmp_slab >> 16);
2554         w[2] = (uint16_t) (bmp_slab >> 32);
2555         w[3] = (uint16_t) (bmp_slab >> 48);
2556
2557         grinder->pcache_qmask[grinder->pcache_w] = w[0];
2558         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2559         grinder->pcache_w += (w[0] != 0);
2560
2561         grinder->pcache_qmask[grinder->pcache_w] = w[1];
2562         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2563         grinder->pcache_w += (w[1] != 0);
2564
2565         grinder->pcache_qmask[grinder->pcache_w] = w[2];
2566         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2567         grinder->pcache_w += (w[2] != 0);
2568
2569         grinder->pcache_qmask[grinder->pcache_w] = w[3];
2570         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2571         grinder->pcache_w += (w[3] != 0);
2572 }
2573
2574 static inline void
2575 grinder_tccache_populate(struct rte_sched_subport *subport,
2576         uint32_t pos, uint32_t qindex, uint16_t qmask)
2577 {
2578         struct rte_sched_grinder *grinder = subport->grinder + pos;
2579         uint8_t b, i;
2580
2581         grinder->tccache_w = 0;
2582         grinder->tccache_r = 0;
2583
2584         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2585                 b = (uint8_t) ((qmask >> i) & 0x1);
2586                 grinder->tccache_qmask[grinder->tccache_w] = b;
2587                 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2588                 grinder->tccache_w += (b != 0);
2589         }
2590
2591         b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2592         grinder->tccache_qmask[grinder->tccache_w] = b;
2593         grinder->tccache_qindex[grinder->tccache_w] = qindex +
2594                 RTE_SCHED_TRAFFIC_CLASS_BE;
2595         grinder->tccache_w += (b != 0);
2596 }
2597
2598 static inline int
2599 grinder_next_tc(struct rte_sched_port *port,
2600         struct rte_sched_subport *subport, uint32_t pos)
2601 {
2602         struct rte_sched_grinder *grinder = subport->grinder + pos;
2603         struct rte_mbuf **qbase;
2604         uint32_t qindex;
2605         uint16_t qsize;
2606
2607         if (grinder->tccache_r == grinder->tccache_w)
2608                 return 0;
2609
2610         qindex = grinder->tccache_qindex[grinder->tccache_r];
2611         qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2612         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2613
2614         grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2615         grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2616         grinder->qsize = qsize;
2617
2618         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2619                 grinder->queue[0] = subport->queue + qindex;
2620                 grinder->qbase[0] = qbase;
2621                 grinder->qindex[0] = qindex;
2622                 grinder->tccache_r++;
2623
2624                 return 1;
2625         }
2626
2627         grinder->queue[0] = subport->queue + qindex;
2628         grinder->queue[1] = subport->queue + qindex + 1;
2629         grinder->queue[2] = subport->queue + qindex + 2;
2630         grinder->queue[3] = subport->queue + qindex + 3;
2631
2632         grinder->qbase[0] = qbase;
2633         grinder->qbase[1] = qbase + qsize;
2634         grinder->qbase[2] = qbase + 2 * qsize;
2635         grinder->qbase[3] = qbase + 3 * qsize;
2636
2637         grinder->qindex[0] = qindex;
2638         grinder->qindex[1] = qindex + 1;
2639         grinder->qindex[2] = qindex + 2;
2640         grinder->qindex[3] = qindex + 3;
2641
2642         grinder->tccache_r++;
2643         return 1;
2644 }
2645
2646 static inline int
2647 grinder_next_pipe(struct rte_sched_port *port,
2648         struct rte_sched_subport *subport, uint32_t pos)
2649 {
2650         struct rte_sched_grinder *grinder = subport->grinder + pos;
2651         uint32_t pipe_qindex;
2652         uint16_t pipe_qmask;
2653
2654         if (grinder->pcache_r < grinder->pcache_w) {
2655                 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2656                 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2657                 grinder->pcache_r++;
2658         } else {
2659                 uint64_t bmp_slab = 0;
2660                 uint32_t bmp_pos = 0;
2661
2662                 /* Get another non-empty pipe group */
2663                 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2664                         return 0;
2665
2666 #ifdef RTE_SCHED_DEBUG
2667                 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2668 #endif
2669
2670                 /* Return if pipe group already in one of the other grinders */
2671                 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2672                 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2673                         return 0;
2674
2675                 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2676
2677                 /* Install new pipe group into grinder's pipe cache */
2678                 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2679
2680                 pipe_qmask = grinder->pcache_qmask[0];
2681                 pipe_qindex = grinder->pcache_qindex[0];
2682                 grinder->pcache_r = 1;
2683         }
2684
2685         /* Install new pipe in the grinder */
2686         grinder->pindex = pipe_qindex >> 4;
2687         grinder->subport = subport;
2688         grinder->pipe = subport->pipe + grinder->pindex;
2689         grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2690         grinder->productive = 0;
2691
2692         grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2693         grinder_next_tc(port, subport, pos);
2694
2695         /* Check for pipe exhaustion */
2696         if (grinder->pindex == subport->pipe_loop) {
2697                 subport->pipe_exhaustion = 1;
2698                 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2699         }
2700
2701         return 1;
2702 }
2703
2704
2705 static inline void
2706 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2707 {
2708         struct rte_sched_grinder *grinder = subport->grinder + pos;
2709         struct rte_sched_pipe *pipe = grinder->pipe;
2710         struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2711         uint32_t qmask = grinder->qmask;
2712
2713         grinder->wrr_tokens[0] =
2714                 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2715         grinder->wrr_tokens[1] =
2716                 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2717         grinder->wrr_tokens[2] =
2718                 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2719         grinder->wrr_tokens[3] =
2720                 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2721
2722         grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2723         grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2724         grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2725         grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2726
2727         grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2728         grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2729         grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2730         grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2731 }
2732
2733 static inline void
2734 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2735 {
2736         struct rte_sched_grinder *grinder = subport->grinder + pos;
2737         struct rte_sched_pipe *pipe = grinder->pipe;
2738
2739         pipe->wrr_tokens[0] =
2740                         (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2741                                 RTE_SCHED_WRR_SHIFT;
2742         pipe->wrr_tokens[1] =
2743                         (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2744                                 RTE_SCHED_WRR_SHIFT;
2745         pipe->wrr_tokens[2] =
2746                         (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2747                                 RTE_SCHED_WRR_SHIFT;
2748         pipe->wrr_tokens[3] =
2749                         (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2750                                 RTE_SCHED_WRR_SHIFT;
2751 }
2752
2753 static inline void
2754 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2755 {
2756         struct rte_sched_grinder *grinder = subport->grinder + pos;
2757         uint16_t wrr_tokens_min;
2758
2759         grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2760         grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2761         grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2762         grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2763
2764         grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2765         wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2766
2767         grinder->wrr_tokens[0] -= wrr_tokens_min;
2768         grinder->wrr_tokens[1] -= wrr_tokens_min;
2769         grinder->wrr_tokens[2] -= wrr_tokens_min;
2770         grinder->wrr_tokens[3] -= wrr_tokens_min;
2771 }
2772
2773
2774 #define grinder_evict(subport, pos)
2775
2776 static inline void
2777 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2778 {
2779         struct rte_sched_grinder *grinder = subport->grinder + pos;
2780
2781         rte_prefetch0(grinder->pipe);
2782         rte_prefetch0(grinder->queue[0]);
2783 }
2784
2785 static inline void
2786 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2787 {
2788         struct rte_sched_grinder *grinder = subport->grinder + pos;
2789         uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2790
2791         qsize = grinder->qsize;
2792         grinder->qpos = 0;
2793
2794         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2795                 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2796
2797                 rte_prefetch0(grinder->qbase[0] + qr[0]);
2798                 return;
2799         }
2800
2801         qr[0] = grinder->queue[0]->qr & (qsize - 1);
2802         qr[1] = grinder->queue[1]->qr & (qsize - 1);
2803         qr[2] = grinder->queue[2]->qr & (qsize - 1);
2804         qr[3] = grinder->queue[3]->qr & (qsize - 1);
2805
2806         rte_prefetch0(grinder->qbase[0] + qr[0]);
2807         rte_prefetch0(grinder->qbase[1] + qr[1]);
2808
2809         grinder_wrr_load(subport, pos);
2810         grinder_wrr(subport, pos);
2811
2812         rte_prefetch0(grinder->qbase[2] + qr[2]);
2813         rte_prefetch0(grinder->qbase[3] + qr[3]);
2814 }
2815
2816 static inline void
2817 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2818 {
2819         struct rte_sched_grinder *grinder = subport->grinder + pos;
2820         uint32_t qpos = grinder->qpos;
2821         struct rte_mbuf **qbase = grinder->qbase[qpos];
2822         uint16_t qsize = grinder->qsize;
2823         uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2824
2825         grinder->pkt = qbase[qr];
2826         rte_prefetch0(grinder->pkt);
2827
2828         if (unlikely((qr & 0x7) == 7)) {
2829                 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2830
2831                 rte_prefetch0(qbase + qr_next);
2832         }
2833 }
2834
2835 static inline uint32_t
2836 grinder_handle(struct rte_sched_port *port,
2837         struct rte_sched_subport *subport, uint32_t pos)
2838 {
2839         struct rte_sched_grinder *grinder = subport->grinder + pos;
2840
2841         switch (grinder->state) {
2842         case e_GRINDER_PREFETCH_PIPE:
2843         {
2844                 if (grinder_next_pipe(port, subport, pos)) {
2845                         grinder_prefetch_pipe(subport, pos);
2846                         subport->busy_grinders++;
2847
2848                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2849                         return 0;
2850                 }
2851
2852                 return 0;
2853         }
2854
2855         case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2856         {
2857                 struct rte_sched_pipe *pipe = grinder->pipe;
2858
2859                 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2860                 grinder_prefetch_tc_queue_arrays(subport, pos);
2861                 grinder_credits_update(port, subport, pos);
2862
2863                 grinder->state = e_GRINDER_PREFETCH_MBUF;
2864                 return 0;
2865         }
2866
2867         case e_GRINDER_PREFETCH_MBUF:
2868         {
2869                 grinder_prefetch_mbuf(subport, pos);
2870
2871                 grinder->state = e_GRINDER_READ_MBUF;
2872                 return 0;
2873         }
2874
2875         case e_GRINDER_READ_MBUF:
2876         {
2877                 uint32_t wrr_active, result = 0;
2878
2879                 result = grinder_schedule(port, subport, pos);
2880
2881                 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2882
2883                 /* Look for next packet within the same TC */
2884                 if (result && grinder->qmask) {
2885                         if (wrr_active)
2886                                 grinder_wrr(subport, pos);
2887
2888                         grinder_prefetch_mbuf(subport, pos);
2889
2890                         return 1;
2891                 }
2892
2893                 if (wrr_active)
2894                         grinder_wrr_store(subport, pos);
2895
2896                 /* Look for another active TC within same pipe */
2897                 if (grinder_next_tc(port, subport, pos)) {
2898                         grinder_prefetch_tc_queue_arrays(subport, pos);
2899
2900                         grinder->state = e_GRINDER_PREFETCH_MBUF;
2901                         return result;
2902                 }
2903
2904                 if (grinder->productive == 0 &&
2905                     subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2906                         subport->pipe_loop = grinder->pindex;
2907
2908                 grinder_evict(subport, pos);
2909
2910                 /* Look for another active pipe */
2911                 if (grinder_next_pipe(port, subport, pos)) {
2912                         grinder_prefetch_pipe(subport, pos);
2913
2914                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2915                         return result;
2916                 }
2917
2918                 /* No active pipe found */
2919                 subport->busy_grinders--;
2920
2921                 grinder->state = e_GRINDER_PREFETCH_PIPE;
2922                 return result;
2923         }
2924
2925         default:
2926                 rte_panic("Algorithmic error (invalid state)\n");
2927                 return 0;
2928         }
2929 }
2930
2931 static inline void
2932 rte_sched_port_time_resync(struct rte_sched_port *port)
2933 {
2934         uint64_t cycles = rte_get_tsc_cycles();
2935         uint64_t cycles_diff;
2936         uint64_t bytes_diff;
2937         uint32_t i;
2938
2939         if (cycles < port->time_cpu_cycles)
2940                 port->time_cpu_cycles = 0;
2941
2942         cycles_diff = cycles - port->time_cpu_cycles;
2943         /* Compute elapsed time in bytes */
2944         bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2945                                            port->inv_cycles_per_byte);
2946
2947         /* Advance port time */
2948         port->time_cpu_cycles +=
2949                 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
2950         port->time_cpu_bytes += bytes_diff;
2951         if (port->time < port->time_cpu_bytes)
2952                 port->time = port->time_cpu_bytes;
2953
2954         /* Reset pipe loop detection */
2955         for (i = 0; i < port->n_subports_per_port; i++)
2956                 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2957 }
2958
2959 static inline int
2960 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2961 {
2962         int exceptions;
2963
2964         /* Check if any exception flag is set */
2965         exceptions = (second_pass && subport->busy_grinders == 0) ||
2966                 (subport->pipe_exhaustion == 1);
2967
2968         /* Clear exception flags */
2969         subport->pipe_exhaustion = 0;
2970
2971         return exceptions;
2972 }
2973
2974 int
2975 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2976 {
2977         struct rte_sched_subport *subport;
2978         uint32_t subport_id = port->subport_id;
2979         uint32_t i, n_subports = 0, count;
2980
2981         port->pkts_out = pkts;
2982         port->n_pkts_out = 0;
2983
2984         rte_sched_port_time_resync(port);
2985
2986         /* Take each queue in the grinder one step further */
2987         for (i = 0, count = 0; ; i++)  {
2988                 subport = port->subports[subport_id];
2989
2990                 count += grinder_handle(port, subport,
2991                                 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2992
2993                 if (count == n_pkts) {
2994                         subport_id++;
2995
2996                         if (subport_id == port->n_subports_per_port)
2997                                 subport_id = 0;
2998
2999                         port->subport_id = subport_id;
3000                         break;
3001                 }
3002
3003                 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
3004                         i = 0;
3005                         subport_id++;
3006                         n_subports++;
3007                 }
3008
3009                 if (subport_id == port->n_subports_per_port)
3010                         subport_id = 0;
3011
3012                 if (n_subports == port->n_subports_per_port) {
3013                         port->subport_id = subport_id;
3014                         break;
3015                 }
3016         }
3017
3018         return count;
3019 }