fix spelling in comments and doxygen
[dpdk.git] / lib / sched / rte_sched.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_mbuf.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
18
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
22
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
25 #endif
26
27 #ifdef RTE_SCHED_VECTOR
28 #include <rte_vect.h>
29
30 #ifdef RTE_ARCH_X86
31 #define SCHED_VECTOR_SSE4
32 #elif defined(__ARM_NEON)
33 #define SCHED_VECTOR_NEON
34 #endif
35
36 #endif
37
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR          (1e-7)
39 #define RTE_SCHED_WRR_SHIFT                   3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC           RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE         (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID                UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID             UINT32_MAX
44
45 /* Scaling for cycles_per_byte calculation
46  * Chosen so that minimum rate is 480 bit/sec
47  */
48 #define RTE_SCHED_TIME_SHIFT                  8
49
50 struct rte_sched_pipe_profile {
51         /* Token bucket (TB) */
52         uint64_t tb_period;
53         uint64_t tb_credits_per_period;
54         uint64_t tb_size;
55
56         /* Pipe traffic classes */
57         uint64_t tc_period;
58         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
59         uint8_t tc_ov_weight;
60
61         /* Pipe best-effort traffic class queues */
62         uint8_t  wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
63 };
64
65 struct rte_sched_pipe {
66         /* Token bucket (TB) */
67         uint64_t tb_time; /* time of last update */
68         uint64_t tb_credits;
69
70         /* Pipe profile and flags */
71         uint32_t profile;
72
73         /* Traffic classes (TCs) */
74         uint64_t tc_time; /* time of next update */
75         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
76
77         /* Weighted Round Robin (WRR) */
78         uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
79
80         /* TC oversubscription */
81         uint64_t tc_ov_credits;
82         uint8_t tc_ov_period_id;
83 } __rte_cache_aligned;
84
85 struct rte_sched_queue {
86         uint16_t qw;
87         uint16_t qr;
88 };
89
90 struct rte_sched_queue_extra {
91         struct rte_sched_queue_stats stats;
92 #ifdef RTE_SCHED_CMAN
93         RTE_STD_C11
94         union {
95                 struct rte_red red;
96                 struct rte_pie pie;
97         };
98 #endif
99 };
100
101 enum grinder_state {
102         e_GRINDER_PREFETCH_PIPE = 0,
103         e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
104         e_GRINDER_PREFETCH_MBUF,
105         e_GRINDER_READ_MBUF
106 };
107
108 struct rte_sched_subport_profile {
109         /* Token bucket (TB) */
110         uint64_t tb_period;
111         uint64_t tb_credits_per_period;
112         uint64_t tb_size;
113
114         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
115         uint64_t tc_period;
116 };
117
118 struct rte_sched_grinder {
119         /* Pipe cache */
120         uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
121         uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
122         uint32_t pcache_w;
123         uint32_t pcache_r;
124
125         /* Current pipe */
126         enum grinder_state state;
127         uint32_t productive;
128         uint32_t pindex;
129         struct rte_sched_subport *subport;
130         struct rte_sched_subport_profile *subport_params;
131         struct rte_sched_pipe *pipe;
132         struct rte_sched_pipe_profile *pipe_params;
133
134         /* TC cache */
135         uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
136         uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
137         uint32_t tccache_w;
138         uint32_t tccache_r;
139
140         /* Current TC */
141         uint32_t tc_index;
142         struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
143         struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
144         uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
145         uint16_t qsize;
146         uint32_t qmask;
147         uint32_t qpos;
148         struct rte_mbuf *pkt;
149
150         /* WRR */
151         uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
152         uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
153         uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
154 };
155
156 struct rte_sched_subport {
157         /* Token bucket (TB) */
158         uint64_t tb_time; /* time of last update */
159         uint64_t tb_credits;
160
161         /* Traffic classes (TCs) */
162         uint64_t tc_time; /* time of next update */
163         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
164
165         /* TC oversubscription */
166         uint64_t tc_ov_wm;
167         uint64_t tc_ov_wm_min;
168         uint64_t tc_ov_wm_max;
169         uint8_t tc_ov_period_id;
170         uint8_t tc_ov;
171         uint32_t tc_ov_n;
172         double tc_ov_rate;
173
174         /* Statistics */
175         struct rte_sched_subport_stats stats __rte_cache_aligned;
176
177         /* subport profile */
178         uint32_t profile;
179         /* Subport pipes */
180         uint32_t n_pipes_per_subport_enabled;
181         uint32_t n_pipe_profiles;
182         uint32_t n_max_pipe_profiles;
183
184         /* Pipe best-effort TC rate */
185         uint64_t pipe_tc_be_rate_max;
186
187         /* Pipe queues size */
188         uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
189
190 #ifdef RTE_SCHED_CMAN
191         bool cman_enabled;
192         enum rte_sched_cman_mode cman;
193
194         RTE_STD_C11
195         union {
196                 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
197                 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
198         };
199 #endif
200
201         /* Scheduling loop detection */
202         uint32_t pipe_loop;
203         uint32_t pipe_exhaustion;
204
205         /* Bitmap */
206         struct rte_bitmap *bmp;
207         uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
208
209         /* Grinders */
210         struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
211         uint32_t busy_grinders;
212
213         /* Queue base calculation */
214         uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
215         uint32_t qsize_sum;
216
217         struct rte_sched_pipe *pipe;
218         struct rte_sched_queue *queue;
219         struct rte_sched_queue_extra *queue_extra;
220         struct rte_sched_pipe_profile *pipe_profiles;
221         uint8_t *bmp_array;
222         struct rte_mbuf **queue_array;
223         uint8_t memory[0] __rte_cache_aligned;
224 } __rte_cache_aligned;
225
226 struct rte_sched_port {
227         /* User parameters */
228         uint32_t n_subports_per_port;
229         uint32_t n_pipes_per_subport;
230         uint32_t n_pipes_per_subport_log2;
231         uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
232         uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
233         uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
234         uint32_t n_subport_profiles;
235         uint32_t n_max_subport_profiles;
236         uint64_t rate;
237         uint32_t mtu;
238         uint32_t frame_overhead;
239         int socket;
240
241         /* Timing */
242         uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cyles */
243         uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
244         uint64_t time;                /* Current NIC TX time measured in bytes */
245         struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
246         uint64_t cycles_per_byte;
247
248         /* Grinders */
249         struct rte_mbuf **pkts_out;
250         uint32_t n_pkts_out;
251         uint32_t subport_id;
252
253         /* Large data structures */
254         struct rte_sched_subport_profile *subport_profiles;
255         struct rte_sched_subport *subports[0] __rte_cache_aligned;
256 } __rte_cache_aligned;
257
258 enum rte_sched_subport_array {
259         e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
260         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
261         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
262         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
263         e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
264         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
265         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
266 };
267
268 static inline uint32_t
269 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
270 {
271         return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
272 }
273
274 static inline struct rte_mbuf **
275 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
276 {
277         uint32_t pindex = qindex >> 4;
278         uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
279
280         return (subport->queue_array + pindex *
281                 subport->qsize_sum + subport->qsize_add[qpos]);
282 }
283
284 static inline uint16_t
285 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
286 struct rte_sched_subport *subport, uint32_t qindex)
287 {
288         uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
289
290         return subport->qsize[tc];
291 }
292
293 static inline uint32_t
294 rte_sched_port_queues_per_port(struct rte_sched_port *port)
295 {
296         uint32_t n_queues = 0, i;
297
298         for (i = 0; i < port->n_subports_per_port; i++)
299                 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
300
301         return n_queues;
302 }
303
304 static inline uint16_t
305 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
306 {
307         uint16_t pipe_queue = port->pipe_queue[traffic_class];
308
309         return pipe_queue;
310 }
311
312 static inline uint8_t
313 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
314 {
315         uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
316
317         return pipe_tc;
318 }
319
320 static inline uint8_t
321 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
322 {
323         uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
324
325         return tc_queue;
326 }
327
328 static int
329 pipe_profile_check(struct rte_sched_pipe_params *params,
330         uint64_t rate, uint16_t *qsize)
331 {
332         uint32_t i;
333
334         /* Pipe parameters */
335         if (params == NULL) {
336                 RTE_LOG(ERR, SCHED,
337                         "%s: Incorrect value for parameter params\n", __func__);
338                 return -EINVAL;
339         }
340
341         /* TB rate: non-zero, not greater than port rate */
342         if (params->tb_rate == 0 ||
343                 params->tb_rate > rate) {
344                 RTE_LOG(ERR, SCHED,
345                         "%s: Incorrect value for tb rate\n", __func__);
346                 return -EINVAL;
347         }
348
349         /* TB size: non-zero */
350         if (params->tb_size == 0) {
351                 RTE_LOG(ERR, SCHED,
352                         "%s: Incorrect value for tb size\n", __func__);
353                 return -EINVAL;
354         }
355
356         /* TC rate: non-zero if qsize non-zero, less than pipe rate */
357         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
358                 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
359                         (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
360                         params->tc_rate[i] > params->tb_rate))) {
361                         RTE_LOG(ERR, SCHED,
362                                 "%s: Incorrect value for qsize or tc_rate\n", __func__);
363                         return -EINVAL;
364                 }
365         }
366
367         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
368                 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
369                 RTE_LOG(ERR, SCHED,
370                         "%s: Incorrect value for be traffic class rate\n", __func__);
371                 return -EINVAL;
372         }
373
374         /* TC period: non-zero */
375         if (params->tc_period == 0) {
376                 RTE_LOG(ERR, SCHED,
377                         "%s: Incorrect value for tc period\n", __func__);
378                 return -EINVAL;
379         }
380
381         /*  Best effort tc oversubscription weight: non-zero */
382         if (params->tc_ov_weight == 0) {
383                 RTE_LOG(ERR, SCHED,
384                         "%s: Incorrect value for tc ov weight\n", __func__);
385                 return -EINVAL;
386         }
387
388         /* Queue WRR weights: non-zero */
389         for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
390                 if (params->wrr_weights[i] == 0) {
391                         RTE_LOG(ERR, SCHED,
392                                 "%s: Incorrect value for wrr weight\n", __func__);
393                         return -EINVAL;
394                 }
395         }
396
397         return 0;
398 }
399
400 static int
401 subport_profile_check(struct rte_sched_subport_profile_params *params,
402         uint64_t rate)
403 {
404         uint32_t i;
405
406         /* Check user parameters */
407         if (params == NULL) {
408                 RTE_LOG(ERR, SCHED, "%s: "
409                 "Incorrect value for parameter params\n", __func__);
410                 return -EINVAL;
411         }
412
413         if (params->tb_rate == 0 || params->tb_rate > rate) {
414                 RTE_LOG(ERR, SCHED, "%s: "
415                 "Incorrect value for tb rate\n", __func__);
416                 return -EINVAL;
417         }
418
419         if (params->tb_size == 0) {
420                 RTE_LOG(ERR, SCHED, "%s: "
421                 "Incorrect value for tb size\n", __func__);
422                 return -EINVAL;
423         }
424
425         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
426                 uint64_t tc_rate = params->tc_rate[i];
427
428                 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
429                         RTE_LOG(ERR, SCHED, "%s: "
430                         "Incorrect value for tc rate\n", __func__);
431                         return -EINVAL;
432                 }
433         }
434
435         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
436                 RTE_LOG(ERR, SCHED, "%s: "
437                 "Incorrect tc rate(best effort)\n", __func__);
438                 return -EINVAL;
439         }
440
441         if (params->tc_period == 0) {
442                 RTE_LOG(ERR, SCHED, "%s: "
443                 "Incorrect value for tc period\n", __func__);
444                 return -EINVAL;
445         }
446
447         return 0;
448 }
449
450 static int
451 rte_sched_port_check_params(struct rte_sched_port_params *params)
452 {
453         uint32_t i;
454
455         if (params == NULL) {
456                 RTE_LOG(ERR, SCHED,
457                         "%s: Incorrect value for parameter params\n", __func__);
458                 return -EINVAL;
459         }
460
461         /* socket */
462         if (params->socket < 0) {
463                 RTE_LOG(ERR, SCHED,
464                         "%s: Incorrect value for socket id\n", __func__);
465                 return -EINVAL;
466         }
467
468         /* rate */
469         if (params->rate == 0) {
470                 RTE_LOG(ERR, SCHED,
471                         "%s: Incorrect value for rate\n", __func__);
472                 return -EINVAL;
473         }
474
475         /* mtu */
476         if (params->mtu == 0) {
477                 RTE_LOG(ERR, SCHED,
478                         "%s: Incorrect value for mtu\n", __func__);
479                 return -EINVAL;
480         }
481
482         /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
483         if (params->n_subports_per_port == 0 ||
484             params->n_subports_per_port > 1u << 16 ||
485             !rte_is_power_of_2(params->n_subports_per_port)) {
486                 RTE_LOG(ERR, SCHED,
487                         "%s: Incorrect value for number of subports\n", __func__);
488                 return -EINVAL;
489         }
490
491         if (params->subport_profiles == NULL ||
492                 params->n_subport_profiles == 0 ||
493                 params->n_max_subport_profiles == 0 ||
494                 params->n_subport_profiles > params->n_max_subport_profiles) {
495                 RTE_LOG(ERR, SCHED,
496                 "%s: Incorrect value for subport profiles\n", __func__);
497                 return -EINVAL;
498         }
499
500         for (i = 0; i < params->n_subport_profiles; i++) {
501                 struct rte_sched_subport_profile_params *p =
502                                                 params->subport_profiles + i;
503                 int status;
504
505                 status = subport_profile_check(p, params->rate);
506                 if (status != 0) {
507                         RTE_LOG(ERR, SCHED,
508                         "%s: subport profile check failed(%d)\n",
509                         __func__, status);
510                         return -EINVAL;
511                 }
512         }
513
514         /* n_pipes_per_subport: non-zero, power of 2 */
515         if (params->n_pipes_per_subport == 0 ||
516             !rte_is_power_of_2(params->n_pipes_per_subport)) {
517                 RTE_LOG(ERR, SCHED,
518                         "%s: Incorrect value for maximum pipes number\n", __func__);
519                 return -EINVAL;
520         }
521
522         return 0;
523 }
524
525 static uint32_t
526 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
527         enum rte_sched_subport_array array)
528 {
529         uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
530         uint32_t n_subport_pipe_queues =
531                 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
532
533         uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
534         uint32_t size_queue =
535                 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
536         uint32_t size_queue_extra
537                 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
538         uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
539                 sizeof(struct rte_sched_pipe_profile);
540         uint32_t size_bmp_array =
541                 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
542         uint32_t size_per_pipe_queue_array, size_queue_array;
543
544         uint32_t base, i;
545
546         size_per_pipe_queue_array = 0;
547         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
548                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
549                         size_per_pipe_queue_array +=
550                                 params->qsize[i] * sizeof(struct rte_mbuf *);
551                 else
552                         size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
553                                 params->qsize[i] * sizeof(struct rte_mbuf *);
554         }
555         size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
556
557         base = 0;
558
559         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
560                 return base;
561         base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
562
563         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
564                 return base;
565         base += RTE_CACHE_LINE_ROUNDUP(size_queue);
566
567         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
568                 return base;
569         base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
570
571         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
572                 return base;
573         base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
574
575         if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
576                 return base;
577         base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
578
579         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
580                 return base;
581         base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
582
583         return base;
584 }
585
586 static void
587 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
588 {
589         uint32_t i;
590
591         subport->qsize_add[0] = 0;
592
593         /* Strict priority traffic class */
594         for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
595                 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
596
597         /* Best-effort traffic class */
598         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
599                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
600                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
601         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
602                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
603                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
604         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
605                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
606                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
607
608         subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
609                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
610 }
611
612 static void
613 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
614 {
615         struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
616
617         RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
618                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
619                 "       Traffic classes: period = %"PRIu64",\n"
620                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
621                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
622                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
623                 "       Best-effort traffic class oversubscription: weight = %hhu\n"
624                 "       WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
625                 i,
626
627                 /* Token bucket */
628                 p->tb_period,
629                 p->tb_credits_per_period,
630                 p->tb_size,
631
632                 /* Traffic classes */
633                 p->tc_period,
634                 p->tc_credits_per_period[0],
635                 p->tc_credits_per_period[1],
636                 p->tc_credits_per_period[2],
637                 p->tc_credits_per_period[3],
638                 p->tc_credits_per_period[4],
639                 p->tc_credits_per_period[5],
640                 p->tc_credits_per_period[6],
641                 p->tc_credits_per_period[7],
642                 p->tc_credits_per_period[8],
643                 p->tc_credits_per_period[9],
644                 p->tc_credits_per_period[10],
645                 p->tc_credits_per_period[11],
646                 p->tc_credits_per_period[12],
647
648                 /* Best-effort traffic class oversubscription */
649                 p->tc_ov_weight,
650
651                 /* WRR */
652                 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
653 }
654
655 static void
656 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
657 {
658         struct rte_sched_subport_profile *p = port->subport_profiles + i;
659
660         RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
661         "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
662         "size = %"PRIu64"\n"
663         "Traffic classes: period = %"PRIu64",\n"
664         "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
665         " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
666         " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
667         i,
668
669         /* Token bucket */
670         p->tb_period,
671         p->tb_credits_per_period,
672         p->tb_size,
673
674         /* Traffic classes */
675         p->tc_period,
676         p->tc_credits_per_period[0],
677         p->tc_credits_per_period[1],
678         p->tc_credits_per_period[2],
679         p->tc_credits_per_period[3],
680         p->tc_credits_per_period[4],
681         p->tc_credits_per_period[5],
682         p->tc_credits_per_period[6],
683         p->tc_credits_per_period[7],
684         p->tc_credits_per_period[8],
685         p->tc_credits_per_period[9],
686         p->tc_credits_per_period[10],
687         p->tc_credits_per_period[11],
688         p->tc_credits_per_period[12]);
689 }
690
691 static inline uint64_t
692 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
693 {
694         uint64_t time = time_ms;
695
696         time = (time * rate) / 1000;
697
698         return time;
699 }
700
701 static void
702 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
703         struct rte_sched_pipe_params *src,
704         struct rte_sched_pipe_profile *dst,
705         uint64_t rate)
706 {
707         uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
708         uint32_t lcd1, lcd2, lcd;
709         uint32_t i;
710
711         /* Token Bucket */
712         if (src->tb_rate == rate) {
713                 dst->tb_credits_per_period = 1;
714                 dst->tb_period = 1;
715         } else {
716                 double tb_rate = (double) src->tb_rate
717                                 / (double) rate;
718                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
719
720                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
721                         &dst->tb_period);
722         }
723
724         dst->tb_size = src->tb_size;
725
726         /* Traffic Classes */
727         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
728                                                 rate);
729
730         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
731                 if (subport->qsize[i])
732                         dst->tc_credits_per_period[i]
733                                 = rte_sched_time_ms_to_bytes(src->tc_period,
734                                         src->tc_rate[i]);
735
736         dst->tc_ov_weight = src->tc_ov_weight;
737
738         /* WRR queues */
739         wrr_cost[0] = src->wrr_weights[0];
740         wrr_cost[1] = src->wrr_weights[1];
741         wrr_cost[2] = src->wrr_weights[2];
742         wrr_cost[3] = src->wrr_weights[3];
743
744         lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
745         lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
746         lcd = rte_get_lcd(lcd1, lcd2);
747
748         wrr_cost[0] = lcd / wrr_cost[0];
749         wrr_cost[1] = lcd / wrr_cost[1];
750         wrr_cost[2] = lcd / wrr_cost[2];
751         wrr_cost[3] = lcd / wrr_cost[3];
752
753         dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
754         dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
755         dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
756         dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
757 }
758
759 static void
760 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
761         struct rte_sched_subport_profile *dst,
762         uint64_t rate)
763 {
764         uint32_t i;
765
766         /* Token Bucket */
767         if (src->tb_rate == rate) {
768                 dst->tb_credits_per_period = 1;
769                 dst->tb_period = 1;
770         } else {
771                 double tb_rate = (double) src->tb_rate
772                                 / (double) rate;
773                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
774
775                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
776                         &dst->tb_period);
777         }
778
779         dst->tb_size = src->tb_size;
780
781         /* Traffic Classes */
782         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
783
784         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
785                 dst->tc_credits_per_period[i]
786                         = rte_sched_time_ms_to_bytes(src->tc_period,
787                                 src->tc_rate[i]);
788 }
789
790 static void
791 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
792         struct rte_sched_subport_params *params, uint64_t rate)
793 {
794         uint32_t i;
795
796         for (i = 0; i < subport->n_pipe_profiles; i++) {
797                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
798                 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
799
800                 rte_sched_pipe_profile_convert(subport, src, dst, rate);
801                 rte_sched_port_log_pipe_profile(subport, i);
802         }
803
804         subport->pipe_tc_be_rate_max = 0;
805         for (i = 0; i < subport->n_pipe_profiles; i++) {
806                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
807                 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
808
809                 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
810                         subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
811         }
812 }
813
814 static void
815 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
816         struct rte_sched_port_params *params,
817         uint64_t rate)
818 {
819         uint32_t i;
820
821         for (i = 0; i < port->n_subport_profiles; i++) {
822                 struct rte_sched_subport_profile_params *src
823                                 = params->subport_profiles + i;
824                 struct rte_sched_subport_profile *dst
825                                 = port->subport_profiles + i;
826
827                 rte_sched_subport_profile_convert(src, dst, rate);
828                 rte_sched_port_log_subport_profile(port, i);
829         }
830 }
831
832 static int
833 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
834         uint32_t n_max_pipes_per_subport,
835         uint64_t rate)
836 {
837         uint32_t i;
838
839         /* Check user parameters */
840         if (params == NULL) {
841                 RTE_LOG(ERR, SCHED,
842                         "%s: Incorrect value for parameter params\n", __func__);
843                 return -EINVAL;
844         }
845
846         /* qsize: if non-zero, power of 2,
847          * no bigger than 32K (due to 16-bit read/write pointers)
848          */
849         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
850                 uint16_t qsize = params->qsize[i];
851
852                 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
853                         RTE_LOG(ERR, SCHED,
854                                 "%s: Incorrect value for qsize\n", __func__);
855                         return -EINVAL;
856                 }
857         }
858
859         if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
860                 RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
861                 return -EINVAL;
862         }
863
864         /* n_pipes_per_subport: non-zero, power of 2 */
865         if (params->n_pipes_per_subport_enabled == 0 ||
866                 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
867             !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
868                 RTE_LOG(ERR, SCHED,
869                         "%s: Incorrect value for pipes number\n", __func__);
870                 return -EINVAL;
871         }
872
873         /* pipe_profiles and n_pipe_profiles */
874         if (params->pipe_profiles == NULL ||
875             params->n_pipe_profiles == 0 ||
876                 params->n_max_pipe_profiles == 0 ||
877                 params->n_pipe_profiles > params->n_max_pipe_profiles) {
878                 RTE_LOG(ERR, SCHED,
879                         "%s: Incorrect value for pipe profiles\n", __func__);
880                 return -EINVAL;
881         }
882
883         for (i = 0; i < params->n_pipe_profiles; i++) {
884                 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
885                 int status;
886
887                 status = pipe_profile_check(p, rate, &params->qsize[0]);
888                 if (status != 0) {
889                         RTE_LOG(ERR, SCHED,
890                                 "%s: Pipe profile check failed(%d)\n", __func__, status);
891                         return -EINVAL;
892                 }
893         }
894
895         return 0;
896 }
897
898 uint32_t
899 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
900         struct rte_sched_subport_params **subport_params)
901 {
902         uint32_t size0 = 0, size1 = 0, i;
903         int status;
904
905         status = rte_sched_port_check_params(port_params);
906         if (status != 0) {
907                 RTE_LOG(ERR, SCHED,
908                         "%s: Port scheduler port params check failed (%d)\n",
909                         __func__, status);
910
911                 return 0;
912         }
913
914         for (i = 0; i < port_params->n_subports_per_port; i++) {
915                 struct rte_sched_subport_params *sp = subport_params[i];
916
917                 status = rte_sched_subport_check_params(sp,
918                                 port_params->n_pipes_per_subport,
919                                 port_params->rate);
920                 if (status != 0) {
921                         RTE_LOG(ERR, SCHED,
922                                 "%s: Port scheduler subport params check failed (%d)\n",
923                                 __func__, status);
924
925                         return 0;
926                 }
927         }
928
929         size0 = sizeof(struct rte_sched_port);
930
931         for (i = 0; i < port_params->n_subports_per_port; i++) {
932                 struct rte_sched_subport_params *sp = subport_params[i];
933
934                 size1 += rte_sched_subport_get_array_base(sp,
935                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
936         }
937
938         return size0 + size1;
939 }
940
941 struct rte_sched_port *
942 rte_sched_port_config(struct rte_sched_port_params *params)
943 {
944         struct rte_sched_port *port = NULL;
945         uint32_t size0, size1, size2;
946         uint32_t cycles_per_byte;
947         uint32_t i, j;
948         int status;
949
950         status = rte_sched_port_check_params(params);
951         if (status != 0) {
952                 RTE_LOG(ERR, SCHED,
953                         "%s: Port scheduler params check failed (%d)\n",
954                         __func__, status);
955                 return NULL;
956         }
957
958         size0 = sizeof(struct rte_sched_port);
959         size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
960         size2 = params->n_max_subport_profiles *
961                 sizeof(struct rte_sched_subport_profile);
962
963         /* Allocate memory to store the data structures */
964         port = rte_zmalloc_socket("qos_params", size0 + size1,
965                                  RTE_CACHE_LINE_SIZE, params->socket);
966         if (port == NULL) {
967                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
968
969                 return NULL;
970         }
971
972         /* Allocate memory to store the subport profile */
973         port->subport_profiles  = rte_zmalloc_socket("subport_profile", size2,
974                                         RTE_CACHE_LINE_SIZE, params->socket);
975         if (port->subport_profiles == NULL) {
976                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
977                 rte_free(port);
978                 return NULL;
979         }
980
981         /* User parameters */
982         port->n_subports_per_port = params->n_subports_per_port;
983         port->n_subport_profiles = params->n_subport_profiles;
984         port->n_max_subport_profiles = params->n_max_subport_profiles;
985         port->n_pipes_per_subport = params->n_pipes_per_subport;
986         port->n_pipes_per_subport_log2 =
987                         __builtin_ctz(params->n_pipes_per_subport);
988         port->socket = params->socket;
989
990         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
991                 port->pipe_queue[i] = i;
992
993         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
994                 port->pipe_tc[i] = j;
995
996                 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
997                         j++;
998         }
999
1000         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1001                 port->tc_queue[i] = j;
1002
1003                 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
1004                         j++;
1005         }
1006         port->rate = params->rate;
1007         port->mtu = params->mtu + params->frame_overhead;
1008         port->frame_overhead = params->frame_overhead;
1009
1010         /* Timing */
1011         port->time_cpu_cycles = rte_get_tsc_cycles();
1012         port->time_cpu_bytes = 0;
1013         port->time = 0;
1014
1015         /* Subport profile table */
1016         rte_sched_port_config_subport_profile_table(port, params, port->rate);
1017
1018         cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1019                 / params->rate;
1020         port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1021         port->cycles_per_byte = cycles_per_byte;
1022
1023         /* Grinders */
1024         port->pkts_out = NULL;
1025         port->n_pkts_out = 0;
1026         port->subport_id = 0;
1027
1028         return port;
1029 }
1030
1031 static inline void
1032 rte_sched_subport_free(struct rte_sched_port *port,
1033         struct rte_sched_subport *subport)
1034 {
1035         uint32_t n_subport_pipe_queues;
1036         uint32_t qindex;
1037
1038         if (subport == NULL)
1039                 return;
1040
1041         n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1042
1043         /* Free enqueued mbufs */
1044         for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1045                 struct rte_mbuf **mbufs =
1046                         rte_sched_subport_pipe_qbase(subport, qindex);
1047                 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1048                 if (qsize != 0) {
1049                         struct rte_sched_queue *queue = subport->queue + qindex;
1050                         uint16_t qr = queue->qr & (qsize - 1);
1051                         uint16_t qw = queue->qw & (qsize - 1);
1052
1053                         for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1054                                 rte_pktmbuf_free(mbufs[qr]);
1055                 }
1056         }
1057
1058         rte_free(subport);
1059 }
1060
1061 void
1062 rte_sched_port_free(struct rte_sched_port *port)
1063 {
1064         uint32_t i;
1065
1066         /* Check user parameters */
1067         if (port == NULL)
1068                 return;
1069
1070         for (i = 0; i < port->n_subports_per_port; i++)
1071                 rte_sched_subport_free(port, port->subports[i]);
1072
1073         rte_free(port->subport_profiles);
1074         rte_free(port);
1075 }
1076
1077 static void
1078 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1079 {
1080         uint32_t i;
1081
1082         for (i = 0; i < n_subports; i++) {
1083                 struct rte_sched_subport *subport = port->subports[i];
1084
1085                 rte_sched_subport_free(port, subport);
1086         }
1087
1088         rte_free(port->subport_profiles);
1089         rte_free(port);
1090 }
1091
1092 #ifdef RTE_SCHED_CMAN
1093 static int
1094 rte_sched_red_config(struct rte_sched_port *port,
1095         struct rte_sched_subport *s,
1096         struct rte_sched_subport_params *params,
1097         uint32_t n_subports)
1098 {
1099         uint32_t i;
1100
1101         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1102
1103                 uint32_t j;
1104
1105                 for (j = 0; j < RTE_COLORS; j++) {
1106                         /* if min/max are both zero, then RED is disabled */
1107                         if ((params->cman_params->red_params[i][j].min_th |
1108                                  params->cman_params->red_params[i][j].max_th) == 0) {
1109                                 continue;
1110                         }
1111
1112                         if (rte_red_config_init(&s->red_config[i][j],
1113                                 params->cman_params->red_params[i][j].wq_log2,
1114                                 params->cman_params->red_params[i][j].min_th,
1115                                 params->cman_params->red_params[i][j].max_th,
1116                                 params->cman_params->red_params[i][j].maxp_inv) != 0) {
1117                                 rte_sched_free_memory(port, n_subports);
1118
1119                                 RTE_LOG(NOTICE, SCHED,
1120                                 "%s: RED configuration init fails\n", __func__);
1121                                 return -EINVAL;
1122                         }
1123                 }
1124         }
1125         s->cman = RTE_SCHED_CMAN_RED;
1126         return 0;
1127 }
1128
1129 static int
1130 rte_sched_pie_config(struct rte_sched_port *port,
1131         struct rte_sched_subport *s,
1132         struct rte_sched_subport_params *params,
1133         uint32_t n_subports)
1134 {
1135         uint32_t i;
1136
1137         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1138                 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
1139                         RTE_LOG(NOTICE, SCHED,
1140                         "%s: PIE tailq threshold incorrect\n", __func__);
1141                         return -EINVAL;
1142                 }
1143
1144                 if (rte_pie_config_init(&s->pie_config[i],
1145                         params->cman_params->pie_params[i].qdelay_ref,
1146                         params->cman_params->pie_params[i].dp_update_interval,
1147                         params->cman_params->pie_params[i].max_burst,
1148                         params->cman_params->pie_params[i].tailq_th) != 0) {
1149                         rte_sched_free_memory(port, n_subports);
1150
1151                         RTE_LOG(NOTICE, SCHED,
1152                         "%s: PIE configuration init fails\n", __func__);
1153                         return -EINVAL;
1154                         }
1155         }
1156         s->cman = RTE_SCHED_CMAN_PIE;
1157         return 0;
1158 }
1159
1160 static int
1161 rte_sched_cman_config(struct rte_sched_port *port,
1162         struct rte_sched_subport *s,
1163         struct rte_sched_subport_params *params,
1164         uint32_t n_subports)
1165 {
1166         if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
1167                 return rte_sched_red_config(port, s, params, n_subports);
1168
1169         else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
1170                 return rte_sched_pie_config(port, s, params, n_subports);
1171
1172         return -EINVAL;
1173 }
1174 #endif
1175
1176 int
1177 rte_sched_subport_config(struct rte_sched_port *port,
1178         uint32_t subport_id,
1179         struct rte_sched_subport_params *params,
1180         uint32_t subport_profile_id)
1181 {
1182         struct rte_sched_subport *s = NULL;
1183         uint32_t n_subports = subport_id;
1184         struct rte_sched_subport_profile *profile;
1185         uint32_t n_subport_pipe_queues, i;
1186         uint32_t size0, size1, bmp_mem_size;
1187         int status;
1188         int ret;
1189
1190         /* Check user parameters */
1191         if (port == NULL) {
1192                 RTE_LOG(ERR, SCHED,
1193                         "%s: Incorrect value for parameter port\n", __func__);
1194                 return 0;
1195         }
1196
1197         if (subport_id >= port->n_subports_per_port) {
1198                 RTE_LOG(ERR, SCHED,
1199                         "%s: Incorrect value for subport id\n", __func__);
1200                 ret = -EINVAL;
1201                 goto out;
1202         }
1203
1204         if (subport_profile_id >= port->n_max_subport_profiles) {
1205                 RTE_LOG(ERR, SCHED, "%s: "
1206                         "Number of subport profile exceeds the max limit\n",
1207                         __func__);
1208                 ret = -EINVAL;
1209                 goto out;
1210         }
1211
1212         /** Memory is allocated only on first invocation of the api for a
1213          * given subport. Subsequent invocation on same subport will just
1214          * update subport bandwidth parameter.
1215          **/
1216         if (port->subports[subport_id] == NULL) {
1217
1218                 status = rte_sched_subport_check_params(params,
1219                         port->n_pipes_per_subport,
1220                         port->rate);
1221                 if (status != 0) {
1222                         RTE_LOG(NOTICE, SCHED,
1223                                 "%s: Port scheduler params check failed (%d)\n",
1224                                 __func__, status);
1225                         ret = -EINVAL;
1226                         goto out;
1227                 }
1228
1229                 /* Determine the amount of memory to allocate */
1230                 size0 = sizeof(struct rte_sched_subport);
1231                 size1 = rte_sched_subport_get_array_base(params,
1232                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1233
1234                 /* Allocate memory to store the data structures */
1235                 s = rte_zmalloc_socket("subport_params", size0 + size1,
1236                         RTE_CACHE_LINE_SIZE, port->socket);
1237                 if (s == NULL) {
1238                         RTE_LOG(ERR, SCHED,
1239                                 "%s: Memory allocation fails\n", __func__);
1240                         ret = -ENOMEM;
1241                         goto out;
1242                 }
1243
1244                 n_subports++;
1245
1246                 subport_profile_id = 0;
1247
1248                 /* Port */
1249                 port->subports[subport_id] = s;
1250
1251                 s->tb_time = port->time;
1252
1253                 /* compile time checks */
1254                 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1255                 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1256                         (RTE_SCHED_PORT_N_GRINDERS - 1));
1257
1258                 /* User parameters */
1259                 s->n_pipes_per_subport_enabled =
1260                                 params->n_pipes_per_subport_enabled;
1261                 memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1262                 s->n_pipe_profiles = params->n_pipe_profiles;
1263                 s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1264
1265 #ifdef RTE_SCHED_CMAN
1266                 if (params->cman_params != NULL) {
1267                         s->cman_enabled = true;
1268                         status = rte_sched_cman_config(port, s, params, n_subports);
1269                         if (status) {
1270                                 RTE_LOG(NOTICE, SCHED,
1271                                         "%s: CMAN configuration fails\n", __func__);
1272                                 return status;
1273                         }
1274                 } else {
1275                         s->cman_enabled = false;
1276                 }
1277 #endif
1278
1279                 /* Scheduling loop detection */
1280                 s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1281                 s->pipe_exhaustion = 0;
1282
1283                 /* Grinders */
1284                 s->busy_grinders = 0;
1285
1286                 /* Queue base calculation */
1287                 rte_sched_subport_config_qsize(s);
1288
1289                 /* Large data structures */
1290                 s->pipe = (struct rte_sched_pipe *)
1291                         (s->memory + rte_sched_subport_get_array_base(params,
1292                         e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1293                 s->queue = (struct rte_sched_queue *)
1294                         (s->memory + rte_sched_subport_get_array_base(params,
1295                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1296                 s->queue_extra = (struct rte_sched_queue_extra *)
1297                         (s->memory + rte_sched_subport_get_array_base(params,
1298                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1299                 s->pipe_profiles = (struct rte_sched_pipe_profile *)
1300                         (s->memory + rte_sched_subport_get_array_base(params,
1301                         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1302                 s->bmp_array =  s->memory + rte_sched_subport_get_array_base(
1303                                 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1304                 s->queue_array = (struct rte_mbuf **)
1305                         (s->memory + rte_sched_subport_get_array_base(params,
1306                         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1307
1308                 /* Pipe profile table */
1309                 rte_sched_subport_config_pipe_profile_table(s, params,
1310                                                             port->rate);
1311
1312                 /* Bitmap */
1313                 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1314                 bmp_mem_size = rte_bitmap_get_memory_footprint(
1315                                                 n_subport_pipe_queues);
1316                 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1317                                         bmp_mem_size);
1318                 if (s->bmp == NULL) {
1319                         RTE_LOG(ERR, SCHED,
1320                                 "%s: Subport bitmap init error\n", __func__);
1321                         ret = -EINVAL;
1322                         goto out;
1323                 }
1324
1325                 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1326                         s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1327
1328 #ifdef RTE_SCHED_SUBPORT_TC_OV
1329                 /* TC oversubscription */
1330                 s->tc_ov_wm_min = port->mtu;
1331                 s->tc_ov_period_id = 0;
1332                 s->tc_ov = 0;
1333                 s->tc_ov_n = 0;
1334                 s->tc_ov_rate = 0;
1335 #endif
1336         }
1337
1338         {
1339         /* update subport parameters from subport profile table*/
1340                 profile = port->subport_profiles + subport_profile_id;
1341
1342                 s = port->subports[subport_id];
1343
1344                 s->tb_credits = profile->tb_size / 2;
1345
1346                 s->tc_time = port->time + profile->tc_period;
1347
1348                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1349                         if (s->qsize[i])
1350                                 s->tc_credits[i] =
1351                                         profile->tc_credits_per_period[i];
1352                         else
1353                                 profile->tc_credits_per_period[i] = 0;
1354
1355 #ifdef RTE_SCHED_SUBPORT_TC_OV
1356                 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
1357                                                         s->pipe_tc_be_rate_max);
1358                 s->tc_ov_wm = s->tc_ov_wm_max;
1359 #endif
1360                 s->profile = subport_profile_id;
1361
1362         }
1363
1364         rte_sched_port_log_subport_profile(port, subport_profile_id);
1365
1366         return 0;
1367
1368 out:
1369         rte_sched_free_memory(port, n_subports);
1370
1371         return ret;
1372 }
1373
1374 int
1375 rte_sched_pipe_config(struct rte_sched_port *port,
1376         uint32_t subport_id,
1377         uint32_t pipe_id,
1378         int32_t pipe_profile)
1379 {
1380         struct rte_sched_subport *s;
1381         struct rte_sched_subport_profile *sp;
1382         struct rte_sched_pipe *p;
1383         struct rte_sched_pipe_profile *params;
1384         uint32_t n_subports = subport_id + 1;
1385         uint32_t deactivate, profile, i;
1386         int ret;
1387
1388         /* Check user parameters */
1389         profile = (uint32_t) pipe_profile;
1390         deactivate = (pipe_profile < 0);
1391
1392         if (port == NULL) {
1393                 RTE_LOG(ERR, SCHED,
1394                         "%s: Incorrect value for parameter port\n", __func__);
1395                 return -EINVAL;
1396         }
1397
1398         if (subport_id >= port->n_subports_per_port) {
1399                 RTE_LOG(ERR, SCHED,
1400                         "%s: Incorrect value for parameter subport id\n", __func__);
1401                 ret = -EINVAL;
1402                 goto out;
1403         }
1404
1405         s = port->subports[subport_id];
1406         if (pipe_id >= s->n_pipes_per_subport_enabled) {
1407                 RTE_LOG(ERR, SCHED,
1408                         "%s: Incorrect value for parameter pipe id\n", __func__);
1409                 ret = -EINVAL;
1410                 goto out;
1411         }
1412
1413         if (!deactivate && profile >= s->n_pipe_profiles) {
1414                 RTE_LOG(ERR, SCHED,
1415                         "%s: Incorrect value for parameter pipe profile\n", __func__);
1416                 ret = -EINVAL;
1417                 goto out;
1418         }
1419
1420         sp = port->subport_profiles + s->profile;
1421         /* Handle the case when pipe already has a valid configuration */
1422         p = s->pipe + pipe_id;
1423         if (p->tb_time) {
1424                 params = s->pipe_profiles + p->profile;
1425
1426                 double subport_tc_be_rate =
1427                 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1428                         / (double) sp->tc_period;
1429                 double pipe_tc_be_rate =
1430                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1431                         / (double) params->tc_period;
1432                 uint32_t tc_be_ov = s->tc_ov;
1433
1434                 /* Unplug pipe from its subport */
1435                 s->tc_ov_n -= params->tc_ov_weight;
1436                 s->tc_ov_rate -= pipe_tc_be_rate;
1437                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1438
1439                 if (s->tc_ov != tc_be_ov) {
1440                         RTE_LOG(DEBUG, SCHED,
1441                                 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1442                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1443                 }
1444
1445                 /* Reset the pipe */
1446                 memset(p, 0, sizeof(struct rte_sched_pipe));
1447         }
1448
1449         if (deactivate)
1450                 return 0;
1451
1452         /* Apply the new pipe configuration */
1453         p->profile = profile;
1454         params = s->pipe_profiles + p->profile;
1455
1456         /* Token Bucket (TB) */
1457         p->tb_time = port->time;
1458         p->tb_credits = params->tb_size / 2;
1459
1460         /* Traffic Classes (TCs) */
1461         p->tc_time = port->time + params->tc_period;
1462
1463         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1464                 if (s->qsize[i])
1465                         p->tc_credits[i] = params->tc_credits_per_period[i];
1466
1467         {
1468                 /* Subport best effort tc oversubscription */
1469                 double subport_tc_be_rate =
1470                 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1471                         / (double) sp->tc_period;
1472                 double pipe_tc_be_rate =
1473                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1474                         / (double) params->tc_period;
1475                 uint32_t tc_be_ov = s->tc_ov;
1476
1477                 s->tc_ov_n += params->tc_ov_weight;
1478                 s->tc_ov_rate += pipe_tc_be_rate;
1479                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1480
1481                 if (s->tc_ov != tc_be_ov) {
1482                         RTE_LOG(DEBUG, SCHED,
1483                                 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1484                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1485                 }
1486                 p->tc_ov_period_id = s->tc_ov_period_id;
1487                 p->tc_ov_credits = s->tc_ov_wm;
1488         }
1489
1490         return 0;
1491
1492 out:
1493         rte_sched_free_memory(port, n_subports);
1494
1495         return ret;
1496 }
1497
1498 int
1499 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1500         uint32_t subport_id,
1501         struct rte_sched_pipe_params *params,
1502         uint32_t *pipe_profile_id)
1503 {
1504         struct rte_sched_subport *s;
1505         struct rte_sched_pipe_profile *pp;
1506         uint32_t i;
1507         int status;
1508
1509         /* Port */
1510         if (port == NULL) {
1511                 RTE_LOG(ERR, SCHED,
1512                         "%s: Incorrect value for parameter port\n", __func__);
1513                 return -EINVAL;
1514         }
1515
1516         /* Subport id not exceeds the max limit */
1517         if (subport_id > port->n_subports_per_port) {
1518                 RTE_LOG(ERR, SCHED,
1519                         "%s: Incorrect value for subport id\n", __func__);
1520                 return -EINVAL;
1521         }
1522
1523         s = port->subports[subport_id];
1524
1525         /* Pipe profiles exceeds the max limit */
1526         if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1527                 RTE_LOG(ERR, SCHED,
1528                         "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1529                 return -EINVAL;
1530         }
1531
1532         /* Pipe params */
1533         status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1534         if (status != 0) {
1535                 RTE_LOG(ERR, SCHED,
1536                         "%s: Pipe profile check failed(%d)\n", __func__, status);
1537                 return -EINVAL;
1538         }
1539
1540         pp = &s->pipe_profiles[s->n_pipe_profiles];
1541         rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1542
1543         /* Pipe profile should not exists */
1544         for (i = 0; i < s->n_pipe_profiles; i++)
1545                 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1546                         RTE_LOG(ERR, SCHED,
1547                                 "%s: Pipe profile exists\n", __func__);
1548                         return -EINVAL;
1549                 }
1550
1551         /* Pipe profile commit */
1552         *pipe_profile_id = s->n_pipe_profiles;
1553         s->n_pipe_profiles++;
1554
1555         if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1556                 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1557
1558         rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1559
1560         return 0;
1561 }
1562
1563 int
1564 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1565         struct rte_sched_subport_profile_params *params,
1566         uint32_t *subport_profile_id)
1567 {
1568         int status;
1569         uint32_t i;
1570         struct rte_sched_subport_profile *dst;
1571
1572         /* Port */
1573         if (port == NULL) {
1574                 RTE_LOG(ERR, SCHED, "%s: "
1575                 "Incorrect value for parameter port\n", __func__);
1576                 return -EINVAL;
1577         }
1578
1579         if (params == NULL) {
1580                 RTE_LOG(ERR, SCHED, "%s: "
1581                 "Incorrect value for parameter profile\n", __func__);
1582                 return -EINVAL;
1583         }
1584
1585         if (subport_profile_id == NULL) {
1586                 RTE_LOG(ERR, SCHED, "%s: "
1587                 "Incorrect value for parameter subport_profile_id\n",
1588                 __func__);
1589                 return -EINVAL;
1590         }
1591
1592         dst = port->subport_profiles + port->n_subport_profiles;
1593
1594         /* Subport profiles exceeds the max limit */
1595         if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1596                 RTE_LOG(ERR, SCHED, "%s: "
1597                 "Number of subport profiles exceeds the max limit\n",
1598                  __func__);
1599                 return -EINVAL;
1600         }
1601
1602         status = subport_profile_check(params, port->rate);
1603         if (status != 0) {
1604                 RTE_LOG(ERR, SCHED,
1605                 "%s: subport profile check failed(%d)\n", __func__, status);
1606                 return -EINVAL;
1607         }
1608
1609         rte_sched_subport_profile_convert(params, dst, port->rate);
1610
1611         /* Subport profile should not exists */
1612         for (i = 0; i < port->n_subport_profiles; i++)
1613                 if (memcmp(port->subport_profiles + i,
1614                     dst, sizeof(*dst)) == 0) {
1615                         RTE_LOG(ERR, SCHED,
1616                         "%s: subport profile exists\n", __func__);
1617                         return -EINVAL;
1618                 }
1619
1620         /* Subport profile commit */
1621         *subport_profile_id = port->n_subport_profiles;
1622         port->n_subport_profiles++;
1623
1624         rte_sched_port_log_subport_profile(port, *subport_profile_id);
1625
1626         return 0;
1627 }
1628
1629 static inline uint32_t
1630 rte_sched_port_qindex(struct rte_sched_port *port,
1631         uint32_t subport,
1632         uint32_t pipe,
1633         uint32_t traffic_class,
1634         uint32_t queue)
1635 {
1636         return ((subport & (port->n_subports_per_port - 1)) <<
1637                 (port->n_pipes_per_subport_log2 + 4)) |
1638                 ((pipe &
1639                 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1640                 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1641                 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1642 }
1643
1644 void
1645 rte_sched_port_pkt_write(struct rte_sched_port *port,
1646                          struct rte_mbuf *pkt,
1647                          uint32_t subport, uint32_t pipe,
1648                          uint32_t traffic_class,
1649                          uint32_t queue, enum rte_color color)
1650 {
1651         uint32_t queue_id =
1652                 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1653
1654         rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1655 }
1656
1657 void
1658 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1659                                   const struct rte_mbuf *pkt,
1660                                   uint32_t *subport, uint32_t *pipe,
1661                                   uint32_t *traffic_class, uint32_t *queue)
1662 {
1663         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1664
1665         *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1666         *pipe = (queue_id >> 4) &
1667                 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1668         *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1669         *queue = rte_sched_port_tc_queue(port, queue_id);
1670 }
1671
1672 enum rte_color
1673 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1674 {
1675         return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1676 }
1677
1678 int
1679 rte_sched_subport_read_stats(struct rte_sched_port *port,
1680                              uint32_t subport_id,
1681                              struct rte_sched_subport_stats *stats,
1682                              uint32_t *tc_ov)
1683 {
1684         struct rte_sched_subport *s;
1685
1686         /* Check user parameters */
1687         if (port == NULL) {
1688                 RTE_LOG(ERR, SCHED,
1689                         "%s: Incorrect value for parameter port\n", __func__);
1690                 return -EINVAL;
1691         }
1692
1693         if (subport_id >= port->n_subports_per_port) {
1694                 RTE_LOG(ERR, SCHED,
1695                         "%s: Incorrect value for subport id\n", __func__);
1696                 return -EINVAL;
1697         }
1698
1699         if (stats == NULL) {
1700                 RTE_LOG(ERR, SCHED,
1701                         "%s: Incorrect value for parameter stats\n", __func__);
1702                 return -EINVAL;
1703         }
1704
1705         if (tc_ov == NULL) {
1706                 RTE_LOG(ERR, SCHED,
1707                         "%s: Incorrect value for tc_ov\n", __func__);
1708                 return -EINVAL;
1709         }
1710
1711         s = port->subports[subport_id];
1712
1713         /* Copy subport stats and clear */
1714         memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1715         memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1716
1717         /* Subport TC oversubscription status */
1718         *tc_ov = s->tc_ov;
1719
1720         return 0;
1721 }
1722
1723 int
1724 rte_sched_queue_read_stats(struct rte_sched_port *port,
1725         uint32_t queue_id,
1726         struct rte_sched_queue_stats *stats,
1727         uint16_t *qlen)
1728 {
1729         struct rte_sched_subport *s;
1730         struct rte_sched_queue *q;
1731         struct rte_sched_queue_extra *qe;
1732         uint32_t subport_id, subport_qmask, subport_qindex;
1733
1734         /* Check user parameters */
1735         if (port == NULL) {
1736                 RTE_LOG(ERR, SCHED,
1737                         "%s: Incorrect value for parameter port\n", __func__);
1738                 return -EINVAL;
1739         }
1740
1741         if (queue_id >= rte_sched_port_queues_per_port(port)) {
1742                 RTE_LOG(ERR, SCHED,
1743                         "%s: Incorrect value for queue id\n", __func__);
1744                 return -EINVAL;
1745         }
1746
1747         if (stats == NULL) {
1748                 RTE_LOG(ERR, SCHED,
1749                         "%s: Incorrect value for parameter stats\n", __func__);
1750                 return -EINVAL;
1751         }
1752
1753         if (qlen == NULL) {
1754                 RTE_LOG(ERR, SCHED,
1755                         "%s: Incorrect value for parameter qlen\n", __func__);
1756                 return -EINVAL;
1757         }
1758         subport_qmask = port->n_pipes_per_subport_log2 + 4;
1759         subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1760
1761         s = port->subports[subport_id];
1762         subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1763         q = s->queue + subport_qindex;
1764         qe = s->queue_extra + subport_qindex;
1765
1766         /* Copy queue stats and clear */
1767         memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1768         memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1769
1770         /* Queue length */
1771         *qlen = q->qw - q->qr;
1772
1773         return 0;
1774 }
1775
1776 #ifdef RTE_SCHED_DEBUG
1777
1778 static inline int
1779 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1780         uint32_t qindex)
1781 {
1782         struct rte_sched_queue *queue = subport->queue + qindex;
1783
1784         return queue->qr == queue->qw;
1785 }
1786
1787 #endif /* RTE_SCHED_DEBUG */
1788
1789 #ifdef RTE_SCHED_COLLECT_STATS
1790
1791 static inline void
1792 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1793         struct rte_sched_subport *subport,
1794         uint32_t qindex,
1795         struct rte_mbuf *pkt)
1796 {
1797         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1798         uint32_t pkt_len = pkt->pkt_len;
1799
1800         subport->stats.n_pkts_tc[tc_index] += 1;
1801         subport->stats.n_bytes_tc[tc_index] += pkt_len;
1802 }
1803
1804 static inline void
1805 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1806         struct rte_sched_subport *subport,
1807         uint32_t qindex,
1808         struct rte_mbuf *pkt,
1809         __rte_unused uint32_t n_pkts_cman_dropped)
1810 {
1811         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1812         uint32_t pkt_len = pkt->pkt_len;
1813
1814         subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1815         subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1816         subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
1817 }
1818
1819 static inline void
1820 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1821         uint32_t qindex,
1822         struct rte_mbuf *pkt)
1823 {
1824         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1825         uint32_t pkt_len = pkt->pkt_len;
1826
1827         qe->stats.n_pkts += 1;
1828         qe->stats.n_bytes += pkt_len;
1829 }
1830
1831 static inline void
1832 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1833         uint32_t qindex,
1834         struct rte_mbuf *pkt,
1835         __rte_unused uint32_t n_pkts_cman_dropped)
1836 {
1837         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1838         uint32_t pkt_len = pkt->pkt_len;
1839
1840         qe->stats.n_pkts_dropped += 1;
1841         qe->stats.n_bytes_dropped += pkt_len;
1842 #ifdef RTE_SCHED_CMAN
1843         if (subport->cman_enabled)
1844                 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
1845 #endif
1846 }
1847
1848 #endif /* RTE_SCHED_COLLECT_STATS */
1849
1850 #ifdef RTE_SCHED_CMAN
1851
1852 static inline int
1853 rte_sched_port_cman_drop(struct rte_sched_port *port,
1854         struct rte_sched_subport *subport,
1855         struct rte_mbuf *pkt,
1856         uint32_t qindex,
1857         uint16_t qlen)
1858 {
1859         if (!subport->cman_enabled)
1860                 return 0;
1861
1862         struct rte_sched_queue_extra *qe;
1863         uint32_t tc_index;
1864
1865         tc_index = rte_sched_port_pipe_tc(port, qindex);
1866         qe = subport->queue_extra + qindex;
1867
1868         /* RED */
1869         if (subport->cman == RTE_SCHED_CMAN_RED) {
1870                 struct rte_red_config *red_cfg;
1871                 struct rte_red *red;
1872                 enum rte_color color;
1873
1874                 color = rte_sched_port_pkt_read_color(pkt);
1875                 red_cfg = &subport->red_config[tc_index][color];
1876
1877                 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1878                         return 0;
1879
1880                 red = &qe->red;
1881
1882                 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1883         }
1884
1885         /* PIE */
1886         struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
1887         struct rte_pie *pie = &qe->pie;
1888
1889         return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
1890 }
1891
1892 static inline void
1893 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
1894         struct rte_sched_subport *subport, uint32_t qindex)
1895 {
1896         if (subport->cman_enabled) {
1897                 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1898                 if (subport->cman == RTE_SCHED_CMAN_RED) {
1899                         struct rte_red *red = &qe->red;
1900
1901                         rte_red_mark_queue_empty(red, port->time);
1902                 }
1903         }
1904 }
1905
1906 static inline void
1907 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
1908 uint32_t qindex, uint32_t pkt_len, uint64_t time) {
1909         if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
1910                 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1911                 struct rte_pie *pie = &qe->pie;
1912
1913                 /* Update queue length */
1914                 pie->qlen -= 1;
1915                 pie->qlen_bytes -= pkt_len;
1916
1917                 rte_pie_dequeue(pie, pkt_len, time);
1918         }
1919 }
1920
1921 #else
1922
1923 static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
1924         struct rte_sched_subport *subport __rte_unused,
1925         struct rte_mbuf *pkt __rte_unused,
1926         uint32_t qindex __rte_unused,
1927         uint16_t qlen __rte_unused)
1928 {
1929         return 0;
1930 }
1931
1932 #define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
1933
1934 static inline void
1935 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
1936         uint32_t qindex __rte_unused,
1937         uint32_t pkt_len __rte_unused,
1938         uint64_t time __rte_unused) {
1939         /* do-nothing when RTE_SCHED_CMAN not defined */
1940 }
1941
1942 #endif /* RTE_SCHED_CMAN */
1943
1944 #ifdef RTE_SCHED_DEBUG
1945
1946 static inline void
1947 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1948                        uint64_t bmp_slab)
1949 {
1950         uint64_t mask;
1951         uint32_t i, panic;
1952
1953         if (bmp_slab == 0)
1954                 rte_panic("Empty slab at position %u\n", bmp_pos);
1955
1956         panic = 0;
1957         for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1958                 if (mask & bmp_slab) {
1959                         if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1960                                 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1961                                 panic = 1;
1962                         }
1963                 }
1964         }
1965
1966         if (panic)
1967                 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1968                         bmp_slab, bmp_pos);
1969 }
1970
1971 #endif /* RTE_SCHED_DEBUG */
1972
1973 static inline struct rte_sched_subport *
1974 rte_sched_port_subport(struct rte_sched_port *port,
1975         struct rte_mbuf *pkt)
1976 {
1977         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1978         uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1979
1980         return port->subports[subport_id];
1981 }
1982
1983 static inline uint32_t
1984 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1985         struct rte_mbuf *pkt, uint32_t subport_qmask)
1986 {
1987         struct rte_sched_queue *q;
1988 #ifdef RTE_SCHED_COLLECT_STATS
1989         struct rte_sched_queue_extra *qe;
1990 #endif
1991         uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1992         uint32_t subport_queue_id = subport_qmask & qindex;
1993
1994         q = subport->queue + subport_queue_id;
1995         rte_prefetch0(q);
1996 #ifdef RTE_SCHED_COLLECT_STATS
1997         qe = subport->queue_extra + subport_queue_id;
1998         rte_prefetch0(qe);
1999 #endif
2000
2001         return subport_queue_id;
2002 }
2003
2004 static inline void
2005 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
2006         struct rte_sched_subport *subport,
2007         uint32_t qindex,
2008         struct rte_mbuf **qbase)
2009 {
2010         struct rte_sched_queue *q;
2011         struct rte_mbuf **q_qw;
2012         uint16_t qsize;
2013
2014         q = subport->queue + qindex;
2015         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2016         q_qw = qbase + (q->qw & (qsize - 1));
2017
2018         rte_prefetch0(q_qw);
2019         rte_bitmap_prefetch0(subport->bmp, qindex);
2020 }
2021
2022 static inline int
2023 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
2024         struct rte_sched_subport *subport,
2025         uint32_t qindex,
2026         struct rte_mbuf **qbase,
2027         struct rte_mbuf *pkt)
2028 {
2029         struct rte_sched_queue *q;
2030         uint16_t qsize;
2031         uint16_t qlen;
2032
2033         q = subport->queue + qindex;
2034         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2035         qlen = q->qw - q->qr;
2036
2037         /* Drop the packet (and update drop stats) when queue is full */
2038         if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
2039                      (qlen >= qsize))) {
2040                 rte_pktmbuf_free(pkt);
2041 #ifdef RTE_SCHED_COLLECT_STATS
2042                 rte_sched_port_update_subport_stats_on_drop(port, subport,
2043                         qindex, pkt, qlen < qsize);
2044                 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
2045                         qlen < qsize);
2046 #endif
2047                 return 0;
2048         }
2049
2050         /* Enqueue packet */
2051         qbase[q->qw & (qsize - 1)] = pkt;
2052         q->qw++;
2053
2054         /* Activate queue in the subport bitmap */
2055         rte_bitmap_set(subport->bmp, qindex);
2056
2057         /* Statistics */
2058 #ifdef RTE_SCHED_COLLECT_STATS
2059         rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2060         rte_sched_port_update_queue_stats(subport, qindex, pkt);
2061 #endif
2062
2063         return 1;
2064 }
2065
2066
2067 /*
2068  * The enqueue function implements a 4-level pipeline with each stage
2069  * processing two different packets. The purpose of using a pipeline
2070  * is to hide the latency of prefetching the data structures. The
2071  * naming convention is presented in the diagram below:
2072  *
2073  *   p00  _______   p10  _______   p20  _______   p30  _______
2074  * ----->|       |----->|       |----->|       |----->|       |----->
2075  *       |   0   |      |   1   |      |   2   |      |   3   |
2076  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2077  *   p01            p11            p21            p31
2078  *
2079  */
2080 int
2081 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2082                        uint32_t n_pkts)
2083 {
2084         struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2085                 *pkt30, *pkt31, *pkt_last;
2086         struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2087                 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2088         struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2089                 *subport20, *subport21, *subport30, *subport31, *subport_last;
2090         uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2091         uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2092         uint32_t subport_qmask;
2093         uint32_t result, i;
2094
2095         result = 0;
2096         subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2097
2098         /*
2099          * Less then 6 input packets available, which is not enough to
2100          * feed the pipeline
2101          */
2102         if (unlikely(n_pkts < 6)) {
2103                 struct rte_sched_subport *subports[5];
2104                 struct rte_mbuf **q_base[5];
2105                 uint32_t q[5];
2106
2107                 /* Prefetch the mbuf structure of each packet */
2108                 for (i = 0; i < n_pkts; i++)
2109                         rte_prefetch0(pkts[i]);
2110
2111                 /* Prefetch the subport structure for each packet */
2112                 for (i = 0; i < n_pkts; i++)
2113                         subports[i] = rte_sched_port_subport(port, pkts[i]);
2114
2115                 /* Prefetch the queue structure for each queue */
2116                 for (i = 0; i < n_pkts; i++)
2117                         q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2118                                         pkts[i], subport_qmask);
2119
2120                 /* Prefetch the write pointer location of each queue */
2121                 for (i = 0; i < n_pkts; i++) {
2122                         q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2123                         rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2124                                 q[i], q_base[i]);
2125                 }
2126
2127                 /* Write each packet to its queue */
2128                 for (i = 0; i < n_pkts; i++)
2129                         result += rte_sched_port_enqueue_qwa(port, subports[i],
2130                                                 q[i], q_base[i], pkts[i]);
2131
2132                 return result;
2133         }
2134
2135         /* Feed the first 3 stages of the pipeline (6 packets needed) */
2136         pkt20 = pkts[0];
2137         pkt21 = pkts[1];
2138         rte_prefetch0(pkt20);
2139         rte_prefetch0(pkt21);
2140
2141         pkt10 = pkts[2];
2142         pkt11 = pkts[3];
2143         rte_prefetch0(pkt10);
2144         rte_prefetch0(pkt11);
2145
2146         subport20 = rte_sched_port_subport(port, pkt20);
2147         subport21 = rte_sched_port_subport(port, pkt21);
2148         q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2149                         pkt20, subport_qmask);
2150         q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2151                         pkt21, subport_qmask);
2152
2153         pkt00 = pkts[4];
2154         pkt01 = pkts[5];
2155         rte_prefetch0(pkt00);
2156         rte_prefetch0(pkt01);
2157
2158         subport10 = rte_sched_port_subport(port, pkt10);
2159         subport11 = rte_sched_port_subport(port, pkt11);
2160         q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2161                         pkt10, subport_qmask);
2162         q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2163                         pkt11, subport_qmask);
2164
2165         q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2166         q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2167         rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2168         rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2169
2170         /* Run the pipeline */
2171         for (i = 6; i < (n_pkts & (~1)); i += 2) {
2172                 /* Propagate stage inputs */
2173                 pkt30 = pkt20;
2174                 pkt31 = pkt21;
2175                 pkt20 = pkt10;
2176                 pkt21 = pkt11;
2177                 pkt10 = pkt00;
2178                 pkt11 = pkt01;
2179                 q30 = q20;
2180                 q31 = q21;
2181                 q20 = q10;
2182                 q21 = q11;
2183                 subport30 = subport20;
2184                 subport31 = subport21;
2185                 subport20 = subport10;
2186                 subport21 = subport11;
2187                 q30_base = q20_base;
2188                 q31_base = q21_base;
2189
2190                 /* Stage 0: Get packets in */
2191                 pkt00 = pkts[i];
2192                 pkt01 = pkts[i + 1];
2193                 rte_prefetch0(pkt00);
2194                 rte_prefetch0(pkt01);
2195
2196                 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2197                 subport10 = rte_sched_port_subport(port, pkt10);
2198                 subport11 = rte_sched_port_subport(port, pkt11);
2199                 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2200                                 pkt10, subport_qmask);
2201                 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2202                                 pkt11, subport_qmask);
2203
2204                 /* Stage 2: Prefetch queue write location */
2205                 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2206                 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2207                 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2208                 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2209
2210                 /* Stage 3: Write packet to queue and activate queue */
2211                 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2212                                 q30, q30_base, pkt30);
2213                 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2214                                 q31, q31_base, pkt31);
2215                 result += r30 + r31;
2216         }
2217
2218         /*
2219          * Drain the pipeline (exactly 6 packets).
2220          * Handle the last packet in the case
2221          * of an odd number of input packets.
2222          */
2223         pkt_last = pkts[n_pkts - 1];
2224         rte_prefetch0(pkt_last);
2225
2226         subport00 = rte_sched_port_subport(port, pkt00);
2227         subport01 = rte_sched_port_subport(port, pkt01);
2228         q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2229                         pkt00, subport_qmask);
2230         q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2231                         pkt01, subport_qmask);
2232
2233         q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2234         q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2235         rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2236         rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2237
2238         r20 = rte_sched_port_enqueue_qwa(port, subport20,
2239                         q20, q20_base, pkt20);
2240         r21 = rte_sched_port_enqueue_qwa(port, subport21,
2241                         q21, q21_base, pkt21);
2242         result += r20 + r21;
2243
2244         subport_last = rte_sched_port_subport(port, pkt_last);
2245         q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2246                                 pkt_last, subport_qmask);
2247
2248         q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2249         q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2250         rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2251         rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2252
2253         r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2254                         q10_base, pkt10);
2255         r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2256                         q11_base, pkt11);
2257         result += r10 + r11;
2258
2259         q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2260         rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2261                 q_last, q_last_base);
2262
2263         r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2264                         q00_base, pkt00);
2265         r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2266                         q01_base, pkt01);
2267         result += r00 + r01;
2268
2269         if (n_pkts & 1) {
2270                 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2271                                         q_last, q_last_base, pkt_last);
2272                 result += r_last;
2273         }
2274
2275         return result;
2276 }
2277
2278 #ifndef RTE_SCHED_SUBPORT_TC_OV
2279
2280 static inline void
2281 grinder_credits_update(struct rte_sched_port *port,
2282         struct rte_sched_subport *subport, uint32_t pos)
2283 {
2284         struct rte_sched_grinder *grinder = subport->grinder + pos;
2285         struct rte_sched_pipe *pipe = grinder->pipe;
2286         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2287         struct rte_sched_subport_profile *sp = grinder->subport_params;
2288         uint64_t n_periods;
2289         uint32_t i;
2290
2291         /* Subport TB */
2292         n_periods = (port->time - subport->tb_time) / sp->tb_period;
2293         subport->tb_credits += n_periods * sp->tb_credits_per_period;
2294         subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2295         subport->tb_time += n_periods * sp->tb_period;
2296
2297         /* Pipe TB */
2298         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2299         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2300         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2301         pipe->tb_time += n_periods * params->tb_period;
2302
2303         /* Subport TCs */
2304         if (unlikely(port->time >= subport->tc_time)) {
2305                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2306                         subport->tc_credits[i] = sp->tc_credits_per_period[i];
2307
2308                 subport->tc_time = port->time + sp->tc_period;
2309         }
2310
2311         /* Pipe TCs */
2312         if (unlikely(port->time >= pipe->tc_time)) {
2313                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2314                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2315
2316                 pipe->tc_time = port->time + params->tc_period;
2317         }
2318 }
2319
2320 #else
2321
2322 static inline uint64_t
2323 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2324         struct rte_sched_subport *subport, uint32_t pos)
2325 {
2326         struct rte_sched_grinder *grinder = subport->grinder + pos;
2327         struct rte_sched_subport_profile *sp = grinder->subport_params;
2328         uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2329         uint64_t tc_consumption = 0, tc_ov_consumption_max;
2330         uint64_t tc_ov_wm = subport->tc_ov_wm;
2331         uint32_t i;
2332
2333         if (subport->tc_ov == 0)
2334                 return subport->tc_ov_wm_max;
2335
2336         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2337                 tc_ov_consumption[i] = sp->tc_credits_per_period[i]
2338                                         -  subport->tc_credits[i];
2339                 tc_consumption += tc_ov_consumption[i];
2340         }
2341
2342         tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2343         sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2344                 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2345
2346         tc_ov_consumption_max =
2347         sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2348                         tc_consumption;
2349
2350         if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2351                 (tc_ov_consumption_max - port->mtu)) {
2352                 tc_ov_wm  -= tc_ov_wm >> 7;
2353                 if (tc_ov_wm < subport->tc_ov_wm_min)
2354                         tc_ov_wm = subport->tc_ov_wm_min;
2355
2356                 return tc_ov_wm;
2357         }
2358
2359         tc_ov_wm += (tc_ov_wm >> 7) + 1;
2360         if (tc_ov_wm > subport->tc_ov_wm_max)
2361                 tc_ov_wm = subport->tc_ov_wm_max;
2362
2363         return tc_ov_wm;
2364 }
2365
2366 static inline void
2367 grinder_credits_update(struct rte_sched_port *port,
2368         struct rte_sched_subport *subport, uint32_t pos)
2369 {
2370         struct rte_sched_grinder *grinder = subport->grinder + pos;
2371         struct rte_sched_pipe *pipe = grinder->pipe;
2372         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2373         struct rte_sched_subport_profile *sp = grinder->subport_params;
2374         uint64_t n_periods;
2375         uint32_t i;
2376
2377         /* Subport TB */
2378         n_periods = (port->time - subport->tb_time) / sp->tb_period;
2379         subport->tb_credits += n_periods * sp->tb_credits_per_period;
2380         subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2381         subport->tb_time += n_periods * sp->tb_period;
2382
2383         /* Pipe TB */
2384         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2385         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2386         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2387         pipe->tb_time += n_periods * params->tb_period;
2388
2389         /* Subport TCs */
2390         if (unlikely(port->time >= subport->tc_time)) {
2391                 subport->tc_ov_wm =
2392                         grinder_tc_ov_credits_update(port, subport, pos);
2393
2394                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2395                         subport->tc_credits[i] = sp->tc_credits_per_period[i];
2396
2397                 subport->tc_time = port->time + sp->tc_period;
2398                 subport->tc_ov_period_id++;
2399         }
2400
2401         /* Pipe TCs */
2402         if (unlikely(port->time >= pipe->tc_time)) {
2403                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2404                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2405                 pipe->tc_time = port->time + params->tc_period;
2406         }
2407
2408         /* Pipe TCs - Oversubscription */
2409         if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2410                 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2411
2412                 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2413         }
2414 }
2415
2416 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2417
2418
2419 #ifndef RTE_SCHED_SUBPORT_TC_OV
2420
2421 static inline int
2422 grinder_credits_check(struct rte_sched_port *port,
2423         struct rte_sched_subport *subport, uint32_t pos)
2424 {
2425         struct rte_sched_grinder *grinder = subport->grinder + pos;
2426         struct rte_sched_pipe *pipe = grinder->pipe;
2427         struct rte_mbuf *pkt = grinder->pkt;
2428         uint32_t tc_index = grinder->tc_index;
2429         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2430         uint64_t subport_tb_credits = subport->tb_credits;
2431         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2432         uint64_t pipe_tb_credits = pipe->tb_credits;
2433         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2434         int enough_credits;
2435
2436         /* Check queue credits */
2437         enough_credits = (pkt_len <= subport_tb_credits) &&
2438                 (pkt_len <= subport_tc_credits) &&
2439                 (pkt_len <= pipe_tb_credits) &&
2440                 (pkt_len <= pipe_tc_credits);
2441
2442         if (!enough_credits)
2443                 return 0;
2444
2445         /* Update port credits */
2446         subport->tb_credits -= pkt_len;
2447         subport->tc_credits[tc_index] -= pkt_len;
2448         pipe->tb_credits -= pkt_len;
2449         pipe->tc_credits[tc_index] -= pkt_len;
2450
2451         return 1;
2452 }
2453
2454 #else
2455
2456 static inline int
2457 grinder_credits_check(struct rte_sched_port *port,
2458         struct rte_sched_subport *subport, uint32_t pos)
2459 {
2460         struct rte_sched_grinder *grinder = subport->grinder + pos;
2461         struct rte_sched_pipe *pipe = grinder->pipe;
2462         struct rte_mbuf *pkt = grinder->pkt;
2463         uint32_t tc_index = grinder->tc_index;
2464         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2465         uint64_t subport_tb_credits = subport->tb_credits;
2466         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2467         uint64_t pipe_tb_credits = pipe->tb_credits;
2468         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2469         uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2470         uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2471         uint64_t pipe_tc_ov_credits;
2472         uint32_t i;
2473         int enough_credits;
2474
2475         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2476                 pipe_tc_ov_mask1[i] = ~0LLU;
2477
2478         pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2479         pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2480         pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2481
2482         /* Check pipe and subport credits */
2483         enough_credits = (pkt_len <= subport_tb_credits) &&
2484                 (pkt_len <= subport_tc_credits) &&
2485                 (pkt_len <= pipe_tb_credits) &&
2486                 (pkt_len <= pipe_tc_credits) &&
2487                 (pkt_len <= pipe_tc_ov_credits);
2488
2489         if (!enough_credits)
2490                 return 0;
2491
2492         /* Update pipe and subport credits */
2493         subport->tb_credits -= pkt_len;
2494         subport->tc_credits[tc_index] -= pkt_len;
2495         pipe->tb_credits -= pkt_len;
2496         pipe->tc_credits[tc_index] -= pkt_len;
2497         pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2498
2499         return 1;
2500 }
2501
2502 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2503
2504
2505 static inline int
2506 grinder_schedule(struct rte_sched_port *port,
2507         struct rte_sched_subport *subport, uint32_t pos)
2508 {
2509         struct rte_sched_grinder *grinder = subport->grinder + pos;
2510         struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2511         uint32_t qindex = grinder->qindex[grinder->qpos];
2512         struct rte_mbuf *pkt = grinder->pkt;
2513         uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2514         uint32_t be_tc_active;
2515
2516         if (!grinder_credits_check(port, subport, pos))
2517                 return 0;
2518
2519         /* Advance port time */
2520         port->time += pkt_len;
2521
2522         /* Send packet */
2523         port->pkts_out[port->n_pkts_out++] = pkt;
2524         queue->qr++;
2525
2526         be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2527         grinder->wrr_tokens[grinder->qpos] +=
2528                 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2529
2530         if (queue->qr == queue->qw) {
2531                 rte_bitmap_clear(subport->bmp, qindex);
2532                 grinder->qmask &= ~(1 << grinder->qpos);
2533                 if (be_tc_active)
2534                         grinder->wrr_mask[grinder->qpos] = 0;
2535
2536                 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
2537         }
2538
2539         rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
2540
2541         /* Reset pipe loop detection */
2542         subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2543         grinder->productive = 1;
2544
2545         return 1;
2546 }
2547
2548 #ifdef SCHED_VECTOR_SSE4
2549
2550 static inline int
2551 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2552 {
2553         __m128i index = _mm_set1_epi32(base_pipe);
2554         __m128i pipes = _mm_load_si128((__m128i *)subport->grinder_base_bmp_pos);
2555         __m128i res = _mm_cmpeq_epi32(pipes, index);
2556
2557         pipes = _mm_load_si128((__m128i *)(subport->grinder_base_bmp_pos + 4));
2558         pipes = _mm_cmpeq_epi32(pipes, index);
2559         res = _mm_or_si128(res, pipes);
2560
2561         if (_mm_testz_si128(res, res))
2562                 return 0;
2563
2564         return 1;
2565 }
2566
2567 #elif defined(SCHED_VECTOR_NEON)
2568
2569 static inline int
2570 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2571 {
2572         uint32x4_t index, pipes;
2573         uint32_t *pos = (uint32_t *)subport->grinder_base_bmp_pos;
2574
2575         index = vmovq_n_u32(base_pipe);
2576         pipes = vld1q_u32(pos);
2577         if (!vminvq_u32(veorq_u32(pipes, index)))
2578                 return 1;
2579
2580         pipes = vld1q_u32(pos + 4);
2581         if (!vminvq_u32(veorq_u32(pipes, index)))
2582                 return 1;
2583
2584         return 0;
2585 }
2586
2587 #else
2588
2589 static inline int
2590 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2591 {
2592         uint32_t i;
2593
2594         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2595                 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2596                         return 1;
2597         }
2598
2599         return 0;
2600 }
2601
2602 #endif /* RTE_SCHED_OPTIMIZATIONS */
2603
2604 static inline void
2605 grinder_pcache_populate(struct rte_sched_subport *subport,
2606         uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2607 {
2608         struct rte_sched_grinder *grinder = subport->grinder + pos;
2609         uint16_t w[4];
2610
2611         grinder->pcache_w = 0;
2612         grinder->pcache_r = 0;
2613
2614         w[0] = (uint16_t) bmp_slab;
2615         w[1] = (uint16_t) (bmp_slab >> 16);
2616         w[2] = (uint16_t) (bmp_slab >> 32);
2617         w[3] = (uint16_t) (bmp_slab >> 48);
2618
2619         grinder->pcache_qmask[grinder->pcache_w] = w[0];
2620         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2621         grinder->pcache_w += (w[0] != 0);
2622
2623         grinder->pcache_qmask[grinder->pcache_w] = w[1];
2624         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2625         grinder->pcache_w += (w[1] != 0);
2626
2627         grinder->pcache_qmask[grinder->pcache_w] = w[2];
2628         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2629         grinder->pcache_w += (w[2] != 0);
2630
2631         grinder->pcache_qmask[grinder->pcache_w] = w[3];
2632         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2633         grinder->pcache_w += (w[3] != 0);
2634 }
2635
2636 static inline void
2637 grinder_tccache_populate(struct rte_sched_subport *subport,
2638         uint32_t pos, uint32_t qindex, uint16_t qmask)
2639 {
2640         struct rte_sched_grinder *grinder = subport->grinder + pos;
2641         uint8_t b, i;
2642
2643         grinder->tccache_w = 0;
2644         grinder->tccache_r = 0;
2645
2646         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2647                 b = (uint8_t) ((qmask >> i) & 0x1);
2648                 grinder->tccache_qmask[grinder->tccache_w] = b;
2649                 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2650                 grinder->tccache_w += (b != 0);
2651         }
2652
2653         b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2654         grinder->tccache_qmask[grinder->tccache_w] = b;
2655         grinder->tccache_qindex[grinder->tccache_w] = qindex +
2656                 RTE_SCHED_TRAFFIC_CLASS_BE;
2657         grinder->tccache_w += (b != 0);
2658 }
2659
2660 static inline int
2661 grinder_next_tc(struct rte_sched_port *port,
2662         struct rte_sched_subport *subport, uint32_t pos)
2663 {
2664         struct rte_sched_grinder *grinder = subport->grinder + pos;
2665         struct rte_mbuf **qbase;
2666         uint32_t qindex;
2667         uint16_t qsize;
2668
2669         if (grinder->tccache_r == grinder->tccache_w)
2670                 return 0;
2671
2672         qindex = grinder->tccache_qindex[grinder->tccache_r];
2673         qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2674         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2675
2676         grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2677         grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2678         grinder->qsize = qsize;
2679
2680         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2681                 grinder->queue[0] = subport->queue + qindex;
2682                 grinder->qbase[0] = qbase;
2683                 grinder->qindex[0] = qindex;
2684                 grinder->tccache_r++;
2685
2686                 return 1;
2687         }
2688
2689         grinder->queue[0] = subport->queue + qindex;
2690         grinder->queue[1] = subport->queue + qindex + 1;
2691         grinder->queue[2] = subport->queue + qindex + 2;
2692         grinder->queue[3] = subport->queue + qindex + 3;
2693
2694         grinder->qbase[0] = qbase;
2695         grinder->qbase[1] = qbase + qsize;
2696         grinder->qbase[2] = qbase + 2 * qsize;
2697         grinder->qbase[3] = qbase + 3 * qsize;
2698
2699         grinder->qindex[0] = qindex;
2700         grinder->qindex[1] = qindex + 1;
2701         grinder->qindex[2] = qindex + 2;
2702         grinder->qindex[3] = qindex + 3;
2703
2704         grinder->tccache_r++;
2705         return 1;
2706 }
2707
2708 static inline int
2709 grinder_next_pipe(struct rte_sched_port *port,
2710         struct rte_sched_subport *subport, uint32_t pos)
2711 {
2712         struct rte_sched_grinder *grinder = subport->grinder + pos;
2713         uint32_t pipe_qindex;
2714         uint16_t pipe_qmask;
2715
2716         if (grinder->pcache_r < grinder->pcache_w) {
2717                 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2718                 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2719                 grinder->pcache_r++;
2720         } else {
2721                 uint64_t bmp_slab = 0;
2722                 uint32_t bmp_pos = 0;
2723
2724                 /* Get another non-empty pipe group */
2725                 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2726                         return 0;
2727
2728 #ifdef RTE_SCHED_DEBUG
2729                 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2730 #endif
2731
2732                 /* Return if pipe group already in one of the other grinders */
2733                 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2734                 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2735                         return 0;
2736
2737                 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2738
2739                 /* Install new pipe group into grinder's pipe cache */
2740                 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2741
2742                 pipe_qmask = grinder->pcache_qmask[0];
2743                 pipe_qindex = grinder->pcache_qindex[0];
2744                 grinder->pcache_r = 1;
2745         }
2746
2747         /* Install new pipe in the grinder */
2748         grinder->pindex = pipe_qindex >> 4;
2749         grinder->subport = subport;
2750         grinder->pipe = subport->pipe + grinder->pindex;
2751         grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2752         grinder->productive = 0;
2753
2754         grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2755         grinder_next_tc(port, subport, pos);
2756
2757         /* Check for pipe exhaustion */
2758         if (grinder->pindex == subport->pipe_loop) {
2759                 subport->pipe_exhaustion = 1;
2760                 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2761         }
2762
2763         return 1;
2764 }
2765
2766
2767 static inline void
2768 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2769 {
2770         struct rte_sched_grinder *grinder = subport->grinder + pos;
2771         struct rte_sched_pipe *pipe = grinder->pipe;
2772         struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2773         uint32_t qmask = grinder->qmask;
2774
2775         grinder->wrr_tokens[0] =
2776                 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2777         grinder->wrr_tokens[1] =
2778                 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2779         grinder->wrr_tokens[2] =
2780                 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2781         grinder->wrr_tokens[3] =
2782                 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2783
2784         grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2785         grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2786         grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2787         grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2788
2789         grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2790         grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2791         grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2792         grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2793 }
2794
2795 static inline void
2796 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2797 {
2798         struct rte_sched_grinder *grinder = subport->grinder + pos;
2799         struct rte_sched_pipe *pipe = grinder->pipe;
2800
2801         pipe->wrr_tokens[0] =
2802                         (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2803                                 RTE_SCHED_WRR_SHIFT;
2804         pipe->wrr_tokens[1] =
2805                         (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2806                                 RTE_SCHED_WRR_SHIFT;
2807         pipe->wrr_tokens[2] =
2808                         (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2809                                 RTE_SCHED_WRR_SHIFT;
2810         pipe->wrr_tokens[3] =
2811                         (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2812                                 RTE_SCHED_WRR_SHIFT;
2813 }
2814
2815 static inline void
2816 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2817 {
2818         struct rte_sched_grinder *grinder = subport->grinder + pos;
2819         uint16_t wrr_tokens_min;
2820
2821         grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2822         grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2823         grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2824         grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2825
2826         grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2827         wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2828
2829         grinder->wrr_tokens[0] -= wrr_tokens_min;
2830         grinder->wrr_tokens[1] -= wrr_tokens_min;
2831         grinder->wrr_tokens[2] -= wrr_tokens_min;
2832         grinder->wrr_tokens[3] -= wrr_tokens_min;
2833 }
2834
2835
2836 #define grinder_evict(subport, pos)
2837
2838 static inline void
2839 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2840 {
2841         struct rte_sched_grinder *grinder = subport->grinder + pos;
2842
2843         rte_prefetch0(grinder->pipe);
2844         rte_prefetch0(grinder->queue[0]);
2845 }
2846
2847 static inline void
2848 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2849 {
2850         struct rte_sched_grinder *grinder = subport->grinder + pos;
2851         uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2852
2853         qsize = grinder->qsize;
2854         grinder->qpos = 0;
2855
2856         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2857                 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2858
2859                 rte_prefetch0(grinder->qbase[0] + qr[0]);
2860                 return;
2861         }
2862
2863         qr[0] = grinder->queue[0]->qr & (qsize - 1);
2864         qr[1] = grinder->queue[1]->qr & (qsize - 1);
2865         qr[2] = grinder->queue[2]->qr & (qsize - 1);
2866         qr[3] = grinder->queue[3]->qr & (qsize - 1);
2867
2868         rte_prefetch0(grinder->qbase[0] + qr[0]);
2869         rte_prefetch0(grinder->qbase[1] + qr[1]);
2870
2871         grinder_wrr_load(subport, pos);
2872         grinder_wrr(subport, pos);
2873
2874         rte_prefetch0(grinder->qbase[2] + qr[2]);
2875         rte_prefetch0(grinder->qbase[3] + qr[3]);
2876 }
2877
2878 static inline void
2879 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2880 {
2881         struct rte_sched_grinder *grinder = subport->grinder + pos;
2882         uint32_t qpos = grinder->qpos;
2883         struct rte_mbuf **qbase = grinder->qbase[qpos];
2884         uint16_t qsize = grinder->qsize;
2885         uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2886
2887         grinder->pkt = qbase[qr];
2888         rte_prefetch0(grinder->pkt);
2889
2890         if (unlikely((qr & 0x7) == 7)) {
2891                 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2892
2893                 rte_prefetch0(qbase + qr_next);
2894         }
2895 }
2896
2897 static inline uint32_t
2898 grinder_handle(struct rte_sched_port *port,
2899         struct rte_sched_subport *subport, uint32_t pos)
2900 {
2901         struct rte_sched_grinder *grinder = subport->grinder + pos;
2902
2903         switch (grinder->state) {
2904         case e_GRINDER_PREFETCH_PIPE:
2905         {
2906                 if (grinder_next_pipe(port, subport, pos)) {
2907                         grinder_prefetch_pipe(subport, pos);
2908                         subport->busy_grinders++;
2909
2910                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2911                         return 0;
2912                 }
2913
2914                 return 0;
2915         }
2916
2917         case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2918         {
2919                 struct rte_sched_pipe *pipe = grinder->pipe;
2920
2921                 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2922                 grinder->subport_params = port->subport_profiles +
2923                                                 subport->profile;
2924
2925                 grinder_prefetch_tc_queue_arrays(subport, pos);
2926                 grinder_credits_update(port, subport, pos);
2927
2928                 grinder->state = e_GRINDER_PREFETCH_MBUF;
2929                 return 0;
2930         }
2931
2932         case e_GRINDER_PREFETCH_MBUF:
2933         {
2934                 grinder_prefetch_mbuf(subport, pos);
2935
2936                 grinder->state = e_GRINDER_READ_MBUF;
2937                 return 0;
2938         }
2939
2940         case e_GRINDER_READ_MBUF:
2941         {
2942                 uint32_t wrr_active, result = 0;
2943
2944                 result = grinder_schedule(port, subport, pos);
2945
2946                 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2947
2948                 /* Look for next packet within the same TC */
2949                 if (result && grinder->qmask) {
2950                         if (wrr_active)
2951                                 grinder_wrr(subport, pos);
2952
2953                         grinder_prefetch_mbuf(subport, pos);
2954
2955                         return 1;
2956                 }
2957
2958                 if (wrr_active)
2959                         grinder_wrr_store(subport, pos);
2960
2961                 /* Look for another active TC within same pipe */
2962                 if (grinder_next_tc(port, subport, pos)) {
2963                         grinder_prefetch_tc_queue_arrays(subport, pos);
2964
2965                         grinder->state = e_GRINDER_PREFETCH_MBUF;
2966                         return result;
2967                 }
2968
2969                 if (grinder->productive == 0 &&
2970                     subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2971                         subport->pipe_loop = grinder->pindex;
2972
2973                 grinder_evict(subport, pos);
2974
2975                 /* Look for another active pipe */
2976                 if (grinder_next_pipe(port, subport, pos)) {
2977                         grinder_prefetch_pipe(subport, pos);
2978
2979                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2980                         return result;
2981                 }
2982
2983                 /* No active pipe found */
2984                 subport->busy_grinders--;
2985
2986                 grinder->state = e_GRINDER_PREFETCH_PIPE;
2987                 return result;
2988         }
2989
2990         default:
2991                 rte_panic("Algorithmic error (invalid state)\n");
2992                 return 0;
2993         }
2994 }
2995
2996 static inline void
2997 rte_sched_port_time_resync(struct rte_sched_port *port)
2998 {
2999         uint64_t cycles = rte_get_tsc_cycles();
3000         uint64_t cycles_diff;
3001         uint64_t bytes_diff;
3002         uint32_t i;
3003
3004         if (cycles < port->time_cpu_cycles)
3005                 port->time_cpu_cycles = 0;
3006
3007         cycles_diff = cycles - port->time_cpu_cycles;
3008         /* Compute elapsed time in bytes */
3009         bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
3010                                            port->inv_cycles_per_byte);
3011
3012         /* Advance port time */
3013         port->time_cpu_cycles +=
3014                 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
3015         port->time_cpu_bytes += bytes_diff;
3016         if (port->time < port->time_cpu_bytes)
3017                 port->time = port->time_cpu_bytes;
3018
3019         /* Reset pipe loop detection */
3020         for (i = 0; i < port->n_subports_per_port; i++)
3021                 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
3022 }
3023
3024 static inline int
3025 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
3026 {
3027         int exceptions;
3028
3029         /* Check if any exception flag is set */
3030         exceptions = (second_pass && subport->busy_grinders == 0) ||
3031                 (subport->pipe_exhaustion == 1);
3032
3033         /* Clear exception flags */
3034         subport->pipe_exhaustion = 0;
3035
3036         return exceptions;
3037 }
3038
3039 int
3040 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
3041 {
3042         struct rte_sched_subport *subport;
3043         uint32_t subport_id = port->subport_id;
3044         uint32_t i, n_subports = 0, count;
3045
3046         port->pkts_out = pkts;
3047         port->n_pkts_out = 0;
3048
3049         rte_sched_port_time_resync(port);
3050
3051         /* Take each queue in the grinder one step further */
3052         for (i = 0, count = 0; ; i++)  {
3053                 subport = port->subports[subport_id];
3054
3055                 count += grinder_handle(port, subport,
3056                                 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
3057
3058                 if (count == n_pkts) {
3059                         subport_id++;
3060
3061                         if (subport_id == port->n_subports_per_port)
3062                                 subport_id = 0;
3063
3064                         port->subport_id = subport_id;
3065                         break;
3066                 }
3067
3068                 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
3069                         i = 0;
3070                         subport_id++;
3071                         n_subports++;
3072                 }
3073
3074                 if (subport_id == port->n_subports_per_port)
3075                         subport_id = 0;
3076
3077                 if (n_subports == port->n_subports_per_port) {
3078                         port->subport_id = subport_id;
3079                         break;
3080                 }
3081         }
3082
3083         return count;
3084 }