a44638f312f5633180899e2a76d875272fcdf6bd
[dpdk.git] / lib / librte_sched / rte_sched.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_prefetch.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_mbuf.h>
16 #include <rte_bitmap.h>
17 #include <rte_reciprocal.h>
18
19 #include "rte_sched.h"
20 #include "rte_sched_common.h"
21 #include "rte_approx.h"
22
23 #ifdef __INTEL_COMPILER
24 #pragma warning(disable:2259) /* conversion may lose significant bits */
25 #endif
26
27 #ifdef RTE_SCHED_VECTOR
28 #include <rte_vect.h>
29
30 #ifdef RTE_ARCH_X86
31 #define SCHED_VECTOR_SSE4
32 #elif defined(__ARM_NEON)
33 #define SCHED_VECTOR_NEON
34 #endif
35
36 #endif
37
38 #define RTE_SCHED_TB_RATE_CONFIG_ERR          (1e-7)
39 #define RTE_SCHED_WRR_SHIFT                   3
40 #define RTE_SCHED_MAX_QUEUES_PER_TC           RTE_SCHED_BE_QUEUES_PER_PIPE
41 #define RTE_SCHED_GRINDER_PCACHE_SIZE         (64 / RTE_SCHED_QUEUES_PER_PIPE)
42 #define RTE_SCHED_PIPE_INVALID                UINT32_MAX
43 #define RTE_SCHED_BMP_POS_INVALID             UINT32_MAX
44
45 /* Scaling for cycles_per_byte calculation
46  * Chosen so that minimum rate is 480 bit/sec
47  */
48 #define RTE_SCHED_TIME_SHIFT                  8
49
50 struct rte_sched_pipe_profile {
51         /* Token bucket (TB) */
52         uint64_t tb_period;
53         uint64_t tb_credits_per_period;
54         uint64_t tb_size;
55
56         /* Pipe traffic classes */
57         uint64_t tc_period;
58         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
59         uint8_t tc_ov_weight;
60
61         /* Pipe best-effort traffic class queues */
62         uint8_t  wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
63 };
64
65 struct rte_sched_pipe {
66         /* Token bucket (TB) */
67         uint64_t tb_time; /* time of last update */
68         uint64_t tb_credits;
69
70         /* Pipe profile and flags */
71         uint32_t profile;
72
73         /* Traffic classes (TCs) */
74         uint64_t tc_time; /* time of next update */
75         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
76
77         /* Weighted Round Robin (WRR) */
78         uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
79
80         /* TC oversubscription */
81         uint64_t tc_ov_credits;
82         uint8_t tc_ov_period_id;
83 } __rte_cache_aligned;
84
85 struct rte_sched_queue {
86         uint16_t qw;
87         uint16_t qr;
88 };
89
90 struct rte_sched_queue_extra {
91         struct rte_sched_queue_stats stats;
92 #ifdef RTE_SCHED_RED
93         struct rte_red red;
94 #endif
95 };
96
97 enum grinder_state {
98         e_GRINDER_PREFETCH_PIPE = 0,
99         e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
100         e_GRINDER_PREFETCH_MBUF,
101         e_GRINDER_READ_MBUF
102 };
103
104 struct rte_sched_subport_profile {
105         /* Token bucket (TB) */
106         uint64_t tb_period;
107         uint64_t tb_credits_per_period;
108         uint64_t tb_size;
109
110         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
111         uint64_t tc_period;
112 };
113
114 struct rte_sched_grinder {
115         /* Pipe cache */
116         uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
117         uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
118         uint32_t pcache_w;
119         uint32_t pcache_r;
120
121         /* Current pipe */
122         enum grinder_state state;
123         uint32_t productive;
124         uint32_t pindex;
125         struct rte_sched_subport *subport;
126         struct rte_sched_pipe *pipe;
127         struct rte_sched_pipe_profile *pipe_params;
128
129         /* TC cache */
130         uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
131         uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
132         uint32_t tccache_w;
133         uint32_t tccache_r;
134
135         /* Current TC */
136         uint32_t tc_index;
137         struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
138         struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
139         uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
140         uint16_t qsize;
141         uint32_t qmask;
142         uint32_t qpos;
143         struct rte_mbuf *pkt;
144
145         /* WRR */
146         uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
147         uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
148         uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
149 };
150
151 struct rte_sched_subport {
152         /* Token bucket (TB) */
153         uint64_t tb_time; /* time of last update */
154         uint64_t tb_period;
155         uint64_t tb_credits_per_period;
156         uint64_t tb_size;
157         uint64_t tb_credits;
158
159         /* Traffic classes (TCs) */
160         uint64_t tc_time; /* time of next update */
161         uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
162         uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
163         uint64_t tc_period;
164
165         /* TC oversubscription */
166         uint64_t tc_ov_wm;
167         uint64_t tc_ov_wm_min;
168         uint64_t tc_ov_wm_max;
169         uint8_t tc_ov_period_id;
170         uint8_t tc_ov;
171         uint32_t tc_ov_n;
172         double tc_ov_rate;
173
174         /* Statistics */
175         struct rte_sched_subport_stats stats __rte_cache_aligned;
176
177         /* Subport pipes */
178         uint32_t n_pipes_per_subport_enabled;
179         uint32_t n_pipe_profiles;
180         uint32_t n_max_pipe_profiles;
181
182         /* Pipe best-effort TC rate */
183         uint64_t pipe_tc_be_rate_max;
184
185         /* Pipe queues size */
186         uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
187
188 #ifdef RTE_SCHED_RED
189         struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
190 #endif
191
192         /* Scheduling loop detection */
193         uint32_t pipe_loop;
194         uint32_t pipe_exhaustion;
195
196         /* Bitmap */
197         struct rte_bitmap *bmp;
198         uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
199
200         /* Grinders */
201         struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
202         uint32_t busy_grinders;
203
204         /* Queue base calculation */
205         uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
206         uint32_t qsize_sum;
207
208         struct rte_sched_pipe *pipe;
209         struct rte_sched_queue *queue;
210         struct rte_sched_queue_extra *queue_extra;
211         struct rte_sched_pipe_profile *pipe_profiles;
212         uint8_t *bmp_array;
213         struct rte_mbuf **queue_array;
214         uint8_t memory[0] __rte_cache_aligned;
215 } __rte_cache_aligned;
216
217 struct rte_sched_port {
218         /* User parameters */
219         uint32_t n_subports_per_port;
220         uint32_t n_pipes_per_subport;
221         uint32_t n_pipes_per_subport_log2;
222         uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
223         uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
224         uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
225         uint32_t n_subport_profiles;
226         uint32_t n_max_subport_profiles;
227         uint64_t rate;
228         uint32_t mtu;
229         uint32_t frame_overhead;
230         int socket;
231
232         /* Timing */
233         uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cyles */
234         uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
235         uint64_t time;                /* Current NIC TX time measured in bytes */
236         struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
237         uint64_t cycles_per_byte;
238
239         /* Grinders */
240         struct rte_mbuf **pkts_out;
241         uint32_t n_pkts_out;
242         uint32_t subport_id;
243
244         /* Large data structures */
245         struct rte_sched_subport_profile *subport_profiles;
246         struct rte_sched_subport *subports[0] __rte_cache_aligned;
247 } __rte_cache_aligned;
248
249 enum rte_sched_subport_array {
250         e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
251         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
252         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
253         e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
254         e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
255         e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
256         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
257 };
258
259 static inline uint32_t
260 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
261 {
262         return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
263 }
264
265 static inline struct rte_mbuf **
266 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
267 {
268         uint32_t pindex = qindex >> 4;
269         uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
270
271         return (subport->queue_array + pindex *
272                 subport->qsize_sum + subport->qsize_add[qpos]);
273 }
274
275 static inline uint16_t
276 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
277 struct rte_sched_subport *subport, uint32_t qindex)
278 {
279         uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
280
281         return subport->qsize[tc];
282 }
283
284 static inline uint32_t
285 rte_sched_port_queues_per_port(struct rte_sched_port *port)
286 {
287         uint32_t n_queues = 0, i;
288
289         for (i = 0; i < port->n_subports_per_port; i++)
290                 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
291
292         return n_queues;
293 }
294
295 static inline uint16_t
296 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
297 {
298         uint16_t pipe_queue = port->pipe_queue[traffic_class];
299
300         return pipe_queue;
301 }
302
303 static inline uint8_t
304 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
305 {
306         uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
307
308         return pipe_tc;
309 }
310
311 static inline uint8_t
312 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
313 {
314         uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
315
316         return tc_queue;
317 }
318
319 static int
320 pipe_profile_check(struct rte_sched_pipe_params *params,
321         uint64_t rate, uint16_t *qsize)
322 {
323         uint32_t i;
324
325         /* Pipe parameters */
326         if (params == NULL) {
327                 RTE_LOG(ERR, SCHED,
328                         "%s: Incorrect value for parameter params\n", __func__);
329                 return -EINVAL;
330         }
331
332         /* TB rate: non-zero, not greater than port rate */
333         if (params->tb_rate == 0 ||
334                 params->tb_rate > rate) {
335                 RTE_LOG(ERR, SCHED,
336                         "%s: Incorrect value for tb rate\n", __func__);
337                 return -EINVAL;
338         }
339
340         /* TB size: non-zero */
341         if (params->tb_size == 0) {
342                 RTE_LOG(ERR, SCHED,
343                         "%s: Incorrect value for tb size\n", __func__);
344                 return -EINVAL;
345         }
346
347         /* TC rate: non-zero if qsize non-zero, less than pipe rate */
348         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
349                 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
350                         (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
351                         params->tc_rate[i] > params->tb_rate))) {
352                         RTE_LOG(ERR, SCHED,
353                                 "%s: Incorrect value for qsize or tc_rate\n", __func__);
354                         return -EINVAL;
355                 }
356         }
357
358         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
359                 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
360                 RTE_LOG(ERR, SCHED,
361                         "%s: Incorrect value for be traffic class rate\n", __func__);
362                 return -EINVAL;
363         }
364
365         /* TC period: non-zero */
366         if (params->tc_period == 0) {
367                 RTE_LOG(ERR, SCHED,
368                         "%s: Incorrect value for tc period\n", __func__);
369                 return -EINVAL;
370         }
371
372         /*  Best effort tc oversubscription weight: non-zero */
373         if (params->tc_ov_weight == 0) {
374                 RTE_LOG(ERR, SCHED,
375                         "%s: Incorrect value for tc ov weight\n", __func__);
376                 return -EINVAL;
377         }
378
379         /* Queue WRR weights: non-zero */
380         for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
381                 if (params->wrr_weights[i] == 0) {
382                         RTE_LOG(ERR, SCHED,
383                                 "%s: Incorrect value for wrr weight\n", __func__);
384                         return -EINVAL;
385                 }
386         }
387
388         return 0;
389 }
390
391 static int
392 subport_profile_check(struct rte_sched_subport_profile_params *params,
393         uint64_t rate)
394 {
395         uint32_t i;
396
397         /* Check user parameters */
398         if (params == NULL) {
399                 RTE_LOG(ERR, SCHED, "%s: "
400                 "Incorrect value for parameter params\n", __func__);
401                 return -EINVAL;
402         }
403
404         if (params->tb_rate == 0 || params->tb_rate > rate) {
405                 RTE_LOG(ERR, SCHED, "%s: "
406                 "Incorrect value for tb rate\n", __func__);
407                 return -EINVAL;
408         }
409
410         if (params->tb_size == 0) {
411                 RTE_LOG(ERR, SCHED, "%s: "
412                 "Incorrect value for tb size\n", __func__);
413                 return -EINVAL;
414         }
415
416         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
417                 uint64_t tc_rate = params->tc_rate[i];
418
419                 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
420                         RTE_LOG(ERR, SCHED, "%s: "
421                         "Incorrect value for tc rate\n", __func__);
422                         return -EINVAL;
423                 }
424         }
425
426         if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
427                 RTE_LOG(ERR, SCHED, "%s: "
428                 "Incorrect tc rate(best effort)\n", __func__);
429                 return -EINVAL;
430         }
431
432         if (params->tc_period == 0) {
433                 RTE_LOG(ERR, SCHED, "%s: "
434                 "Incorrect value for tc period\n", __func__);
435                 return -EINVAL;
436         }
437
438         return 0;
439 }
440
441 static int
442 rte_sched_port_check_params(struct rte_sched_port_params *params)
443 {
444         uint32_t i;
445
446         if (params == NULL) {
447                 RTE_LOG(ERR, SCHED,
448                         "%s: Incorrect value for parameter params\n", __func__);
449                 return -EINVAL;
450         }
451
452         /* socket */
453         if (params->socket < 0) {
454                 RTE_LOG(ERR, SCHED,
455                         "%s: Incorrect value for socket id\n", __func__);
456                 return -EINVAL;
457         }
458
459         /* rate */
460         if (params->rate == 0) {
461                 RTE_LOG(ERR, SCHED,
462                         "%s: Incorrect value for rate\n", __func__);
463                 return -EINVAL;
464         }
465
466         /* mtu */
467         if (params->mtu == 0) {
468                 RTE_LOG(ERR, SCHED,
469                         "%s: Incorrect value for mtu\n", __func__);
470                 return -EINVAL;
471         }
472
473         /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
474         if (params->n_subports_per_port == 0 ||
475             params->n_subports_per_port > 1u << 16 ||
476             !rte_is_power_of_2(params->n_subports_per_port)) {
477                 RTE_LOG(ERR, SCHED,
478                         "%s: Incorrect value for number of subports\n", __func__);
479                 return -EINVAL;
480         }
481
482         if (params->subport_profiles == NULL ||
483                 params->n_subport_profiles == 0 ||
484                 params->n_max_subport_profiles == 0 ||
485                 params->n_subport_profiles > params->n_max_subport_profiles) {
486                 RTE_LOG(ERR, SCHED,
487                 "%s: Incorrect value for subport profiles\n", __func__);
488                 return -EINVAL;
489         }
490
491         for (i = 0; i < params->n_subport_profiles; i++) {
492                 struct rte_sched_subport_profile_params *p =
493                                                 params->subport_profiles + i;
494                 int status;
495
496                 status = subport_profile_check(p, params->rate);
497                 if (status != 0) {
498                         RTE_LOG(ERR, SCHED,
499                         "%s: subport profile check failed(%d)\n",
500                         __func__, status);
501                         return -EINVAL;
502                 }
503         }
504
505         /* n_pipes_per_subport: non-zero, power of 2 */
506         if (params->n_pipes_per_subport == 0 ||
507             !rte_is_power_of_2(params->n_pipes_per_subport)) {
508                 RTE_LOG(ERR, SCHED,
509                         "%s: Incorrect value for maximum pipes number\n", __func__);
510                 return -EINVAL;
511         }
512
513         return 0;
514 }
515
516 static uint32_t
517 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
518         enum rte_sched_subport_array array)
519 {
520         uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
521         uint32_t n_subport_pipe_queues =
522                 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
523
524         uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
525         uint32_t size_queue =
526                 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
527         uint32_t size_queue_extra
528                 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
529         uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
530                 sizeof(struct rte_sched_pipe_profile);
531         uint32_t size_bmp_array =
532                 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
533         uint32_t size_per_pipe_queue_array, size_queue_array;
534
535         uint32_t base, i;
536
537         size_per_pipe_queue_array = 0;
538         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
539                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
540                         size_per_pipe_queue_array +=
541                                 params->qsize[i] * sizeof(struct rte_mbuf *);
542                 else
543                         size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
544                                 params->qsize[i] * sizeof(struct rte_mbuf *);
545         }
546         size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
547
548         base = 0;
549
550         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
551                 return base;
552         base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
553
554         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
555                 return base;
556         base += RTE_CACHE_LINE_ROUNDUP(size_queue);
557
558         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
559                 return base;
560         base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
561
562         if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
563                 return base;
564         base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
565
566         if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
567                 return base;
568         base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
569
570         if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
571                 return base;
572         base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
573
574         return base;
575 }
576
577 static void
578 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
579 {
580         uint32_t i;
581
582         subport->qsize_add[0] = 0;
583
584         /* Strict prority traffic class */
585         for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
586                 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
587
588         /* Best-effort traffic class */
589         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
590                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
591                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
592         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
593                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
594                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
595         subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
596                 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
597                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
598
599         subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
600                 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
601 }
602
603 static void
604 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
605 {
606         struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
607
608         RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
609                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
610                 "       Traffic classes: period = %"PRIu64",\n"
611                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
612                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
613                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
614                 "       Best-effort traffic class oversubscription: weight = %hhu\n"
615                 "       WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
616                 i,
617
618                 /* Token bucket */
619                 p->tb_period,
620                 p->tb_credits_per_period,
621                 p->tb_size,
622
623                 /* Traffic classes */
624                 p->tc_period,
625                 p->tc_credits_per_period[0],
626                 p->tc_credits_per_period[1],
627                 p->tc_credits_per_period[2],
628                 p->tc_credits_per_period[3],
629                 p->tc_credits_per_period[4],
630                 p->tc_credits_per_period[5],
631                 p->tc_credits_per_period[6],
632                 p->tc_credits_per_period[7],
633                 p->tc_credits_per_period[8],
634                 p->tc_credits_per_period[9],
635                 p->tc_credits_per_period[10],
636                 p->tc_credits_per_period[11],
637                 p->tc_credits_per_period[12],
638
639                 /* Best-effort traffic class oversubscription */
640                 p->tc_ov_weight,
641
642                 /* WRR */
643                 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
644 }
645
646 static void
647 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
648 {
649         struct rte_sched_subport_profile *p = port->subport_profiles + i;
650
651         RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
652         "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
653         "size = %"PRIu64"\n"
654         "Traffic classes: period = %"PRIu64",\n"
655         "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
656         " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
657         " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
658         i,
659
660         /* Token bucket */
661         p->tb_period,
662         p->tb_credits_per_period,
663         p->tb_size,
664
665         /* Traffic classes */
666         p->tc_period,
667         p->tc_credits_per_period[0],
668         p->tc_credits_per_period[1],
669         p->tc_credits_per_period[2],
670         p->tc_credits_per_period[3],
671         p->tc_credits_per_period[4],
672         p->tc_credits_per_period[5],
673         p->tc_credits_per_period[6],
674         p->tc_credits_per_period[7],
675         p->tc_credits_per_period[8],
676         p->tc_credits_per_period[9],
677         p->tc_credits_per_period[10],
678         p->tc_credits_per_period[11],
679         p->tc_credits_per_period[12]);
680 }
681
682 static inline uint64_t
683 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
684 {
685         uint64_t time = time_ms;
686
687         time = (time * rate) / 1000;
688
689         return time;
690 }
691
692 static void
693 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
694         struct rte_sched_pipe_params *src,
695         struct rte_sched_pipe_profile *dst,
696         uint64_t rate)
697 {
698         uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
699         uint32_t lcd1, lcd2, lcd;
700         uint32_t i;
701
702         /* Token Bucket */
703         if (src->tb_rate == rate) {
704                 dst->tb_credits_per_period = 1;
705                 dst->tb_period = 1;
706         } else {
707                 double tb_rate = (double) src->tb_rate
708                                 / (double) rate;
709                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
710
711                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
712                         &dst->tb_period);
713         }
714
715         dst->tb_size = src->tb_size;
716
717         /* Traffic Classes */
718         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
719                                                 rate);
720
721         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
722                 if (subport->qsize[i])
723                         dst->tc_credits_per_period[i]
724                                 = rte_sched_time_ms_to_bytes(src->tc_period,
725                                         src->tc_rate[i]);
726
727         dst->tc_ov_weight = src->tc_ov_weight;
728
729         /* WRR queues */
730         wrr_cost[0] = src->wrr_weights[0];
731         wrr_cost[1] = src->wrr_weights[1];
732         wrr_cost[2] = src->wrr_weights[2];
733         wrr_cost[3] = src->wrr_weights[3];
734
735         lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
736         lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
737         lcd = rte_get_lcd(lcd1, lcd2);
738
739         wrr_cost[0] = lcd / wrr_cost[0];
740         wrr_cost[1] = lcd / wrr_cost[1];
741         wrr_cost[2] = lcd / wrr_cost[2];
742         wrr_cost[3] = lcd / wrr_cost[3];
743
744         dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
745         dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
746         dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
747         dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
748 }
749
750 static void
751 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
752         struct rte_sched_subport_profile *dst,
753         uint64_t rate)
754 {
755         uint32_t i;
756
757         /* Token Bucket */
758         if (src->tb_rate == rate) {
759                 dst->tb_credits_per_period = 1;
760                 dst->tb_period = 1;
761         } else {
762                 double tb_rate = (double) src->tb_rate
763                                 / (double) rate;
764                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
765
766                 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
767                         &dst->tb_period);
768         }
769
770         dst->tb_size = src->tb_size;
771
772         /* Traffic Classes */
773         dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
774
775         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
776                 dst->tc_credits_per_period[i]
777                         = rte_sched_time_ms_to_bytes(src->tc_period,
778                                 src->tc_rate[i]);
779 }
780
781 static void
782 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
783         struct rte_sched_subport_params *params, uint64_t rate)
784 {
785         uint32_t i;
786
787         for (i = 0; i < subport->n_pipe_profiles; i++) {
788                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
789                 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
790
791                 rte_sched_pipe_profile_convert(subport, src, dst, rate);
792                 rte_sched_port_log_pipe_profile(subport, i);
793         }
794
795         subport->pipe_tc_be_rate_max = 0;
796         for (i = 0; i < subport->n_pipe_profiles; i++) {
797                 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
798                 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
799
800                 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
801                         subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
802         }
803 }
804
805 static void
806 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
807         struct rte_sched_port_params *params,
808         uint64_t rate)
809 {
810         uint32_t i;
811
812         for (i = 0; i < port->n_subport_profiles; i++) {
813                 struct rte_sched_subport_profile_params *src
814                                 = params->subport_profiles + i;
815                 struct rte_sched_subport_profile *dst
816                                 = port->subport_profiles + i;
817
818                 rte_sched_subport_profile_convert(src, dst, rate);
819                 rte_sched_port_log_subport_profile(port, i);
820         }
821 }
822
823 static int
824 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
825         uint32_t n_max_pipes_per_subport,
826         uint64_t rate)
827 {
828         uint32_t i;
829
830         /* Check user parameters */
831         if (params == NULL) {
832                 RTE_LOG(ERR, SCHED,
833                         "%s: Incorrect value for parameter params\n", __func__);
834                 return -EINVAL;
835         }
836
837         if (params->tb_rate == 0 || params->tb_rate > rate) {
838                 RTE_LOG(ERR, SCHED,
839                         "%s: Incorrect value for tb rate\n", __func__);
840                 return -EINVAL;
841         }
842
843         if (params->tb_size == 0) {
844                 RTE_LOG(ERR, SCHED,
845                         "%s: Incorrect value for tb size\n", __func__);
846                 return -EINVAL;
847         }
848
849         /* qsize: if non-zero, power of 2,
850          * no bigger than 32K (due to 16-bit read/write pointers)
851          */
852         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
853                 uint16_t qsize = params->qsize[i];
854
855                 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
856                         RTE_LOG(ERR, SCHED,
857                                 "%s: Incorrect value for qsize\n", __func__);
858                         return -EINVAL;
859                 }
860         }
861
862         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
863                 uint64_t tc_rate = params->tc_rate[i];
864                 uint16_t qsize = params->qsize[i];
865
866                 if ((qsize == 0 && tc_rate != 0) ||
867                         (qsize != 0 && tc_rate == 0) ||
868                         (tc_rate > params->tb_rate)) {
869                         RTE_LOG(ERR, SCHED,
870                                 "%s: Incorrect value for tc rate\n", __func__);
871                         return -EINVAL;
872                 }
873         }
874
875         if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
876                 params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
877                 RTE_LOG(ERR, SCHED,
878                         "%s: Incorrect qsize or tc rate(best effort)\n", __func__);
879                 return -EINVAL;
880         }
881
882         if (params->tc_period == 0) {
883                 RTE_LOG(ERR, SCHED,
884                         "%s: Incorrect value for tc period\n", __func__);
885                 return -EINVAL;
886         }
887
888         /* n_pipes_per_subport: non-zero, power of 2 */
889         if (params->n_pipes_per_subport_enabled == 0 ||
890                 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
891             !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
892                 RTE_LOG(ERR, SCHED,
893                         "%s: Incorrect value for pipes number\n", __func__);
894                 return -EINVAL;
895         }
896
897         /* pipe_profiles and n_pipe_profiles */
898         if (params->pipe_profiles == NULL ||
899             params->n_pipe_profiles == 0 ||
900                 params->n_max_pipe_profiles == 0 ||
901                 params->n_pipe_profiles > params->n_max_pipe_profiles) {
902                 RTE_LOG(ERR, SCHED,
903                         "%s: Incorrect value for pipe profiles\n", __func__);
904                 return -EINVAL;
905         }
906
907         for (i = 0; i < params->n_pipe_profiles; i++) {
908                 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
909                 int status;
910
911                 status = pipe_profile_check(p, rate, &params->qsize[0]);
912                 if (status != 0) {
913                         RTE_LOG(ERR, SCHED,
914                                 "%s: Pipe profile check failed(%d)\n", __func__, status);
915                         return -EINVAL;
916                 }
917         }
918
919         return 0;
920 }
921
922 uint32_t
923 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
924         struct rte_sched_subport_params **subport_params)
925 {
926         uint32_t size0 = 0, size1 = 0, i;
927         int status;
928
929         status = rte_sched_port_check_params(port_params);
930         if (status != 0) {
931                 RTE_LOG(ERR, SCHED,
932                         "%s: Port scheduler port params check failed (%d)\n",
933                         __func__, status);
934
935                 return 0;
936         }
937
938         for (i = 0; i < port_params->n_subports_per_port; i++) {
939                 struct rte_sched_subport_params *sp = subport_params[i];
940
941                 status = rte_sched_subport_check_params(sp,
942                                 port_params->n_pipes_per_subport,
943                                 port_params->rate);
944                 if (status != 0) {
945                         RTE_LOG(ERR, SCHED,
946                                 "%s: Port scheduler subport params check failed (%d)\n",
947                                 __func__, status);
948
949                         return 0;
950                 }
951         }
952
953         size0 = sizeof(struct rte_sched_port);
954
955         for (i = 0; i < port_params->n_subports_per_port; i++) {
956                 struct rte_sched_subport_params *sp = subport_params[i];
957
958                 size1 += rte_sched_subport_get_array_base(sp,
959                                         e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
960         }
961
962         return size0 + size1;
963 }
964
965 struct rte_sched_port *
966 rte_sched_port_config(struct rte_sched_port_params *params)
967 {
968         struct rte_sched_port *port = NULL;
969         uint32_t size0, size1, size2;
970         uint32_t cycles_per_byte;
971         uint32_t i, j;
972         int status;
973
974         status = rte_sched_port_check_params(params);
975         if (status != 0) {
976                 RTE_LOG(ERR, SCHED,
977                         "%s: Port scheduler params check failed (%d)\n",
978                         __func__, status);
979                 return NULL;
980         }
981
982         size0 = sizeof(struct rte_sched_port);
983         size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
984         size2 = params->n_max_subport_profiles *
985                 sizeof(struct rte_sched_subport_profile);
986
987         /* Allocate memory to store the data structures */
988         port = rte_zmalloc_socket("qos_params", size0 + size1,
989                                  RTE_CACHE_LINE_SIZE, params->socket);
990         if (port == NULL) {
991                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
992
993                 return NULL;
994         }
995
996         /* Allocate memory to store the subport profile */
997         port->subport_profiles  = rte_zmalloc_socket("subport_profile", size2,
998                                         RTE_CACHE_LINE_SIZE, params->socket);
999         if (port == NULL) {
1000                 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
1001
1002                 return NULL;
1003         }
1004
1005         /* User parameters */
1006         port->n_subports_per_port = params->n_subports_per_port;
1007         port->n_subport_profiles = params->n_subport_profiles;
1008         port->n_max_subport_profiles = params->n_max_subport_profiles;
1009         port->n_pipes_per_subport = params->n_pipes_per_subport;
1010         port->n_pipes_per_subport_log2 =
1011                         __builtin_ctz(params->n_pipes_per_subport);
1012         port->socket = params->socket;
1013
1014         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1015                 port->pipe_queue[i] = i;
1016
1017         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1018                 port->pipe_tc[i] = j;
1019
1020                 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
1021                         j++;
1022         }
1023
1024         for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
1025                 port->tc_queue[i] = j;
1026
1027                 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
1028                         j++;
1029         }
1030         port->rate = params->rate;
1031         port->mtu = params->mtu + params->frame_overhead;
1032         port->frame_overhead = params->frame_overhead;
1033
1034         /* Timing */
1035         port->time_cpu_cycles = rte_get_tsc_cycles();
1036         port->time_cpu_bytes = 0;
1037         port->time = 0;
1038
1039         /* Subport profile table */
1040         rte_sched_port_config_subport_profile_table(port, params, port->rate);
1041
1042         cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1043                 / params->rate;
1044         port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1045         port->cycles_per_byte = cycles_per_byte;
1046
1047         /* Grinders */
1048         port->pkts_out = NULL;
1049         port->n_pkts_out = 0;
1050         port->subport_id = 0;
1051
1052         return port;
1053 }
1054
1055 static inline void
1056 rte_sched_subport_free(struct rte_sched_port *port,
1057         struct rte_sched_subport *subport)
1058 {
1059         uint32_t n_subport_pipe_queues;
1060         uint32_t qindex;
1061
1062         if (subport == NULL)
1063                 return;
1064
1065         n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1066
1067         /* Free enqueued mbufs */
1068         for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1069                 struct rte_mbuf **mbufs =
1070                         rte_sched_subport_pipe_qbase(subport, qindex);
1071                 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1072                 if (qsize != 0) {
1073                         struct rte_sched_queue *queue = subport->queue + qindex;
1074                         uint16_t qr = queue->qr & (qsize - 1);
1075                         uint16_t qw = queue->qw & (qsize - 1);
1076
1077                         for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1078                                 rte_pktmbuf_free(mbufs[qr]);
1079                 }
1080         }
1081
1082         rte_free(subport);
1083 }
1084
1085 void
1086 rte_sched_port_free(struct rte_sched_port *port)
1087 {
1088         uint32_t i;
1089
1090         /* Check user parameters */
1091         if (port == NULL)
1092                 return;
1093
1094         for (i = 0; i < port->n_subports_per_port; i++)
1095                 rte_sched_subport_free(port, port->subports[i]);
1096
1097         rte_free(port->subport_profiles);
1098         rte_free(port);
1099 }
1100
1101 static void
1102 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
1103 {
1104         struct rte_sched_subport *s = port->subports[i];
1105
1106         RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
1107                 "       Token bucket: period = %"PRIu64", credits per period = %"PRIu64
1108                 ", size = %"PRIu64"\n"
1109                 "       Traffic classes: period = %"PRIu64"\n"
1110                 "       credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
1111                 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
1112                 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
1113                 "       Best effort traffic class oversubscription: wm min = %"PRIu64
1114                 ", wm max = %"PRIu64"\n",
1115                 i,
1116
1117                 /* Token bucket */
1118                 s->tb_period,
1119                 s->tb_credits_per_period,
1120                 s->tb_size,
1121
1122                 /* Traffic classes */
1123                 s->tc_period,
1124                 s->tc_credits_per_period[0],
1125                 s->tc_credits_per_period[1],
1126                 s->tc_credits_per_period[2],
1127                 s->tc_credits_per_period[3],
1128                 s->tc_credits_per_period[4],
1129                 s->tc_credits_per_period[5],
1130                 s->tc_credits_per_period[6],
1131                 s->tc_credits_per_period[7],
1132                 s->tc_credits_per_period[8],
1133                 s->tc_credits_per_period[9],
1134                 s->tc_credits_per_period[10],
1135                 s->tc_credits_per_period[11],
1136                 s->tc_credits_per_period[12],
1137
1138                 /* Best effort traffic class oversubscription */
1139                 s->tc_ov_wm_min,
1140                 s->tc_ov_wm_max);
1141 }
1142
1143 static void
1144 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1145 {
1146         uint32_t i;
1147
1148         for (i = 0; i < n_subports; i++) {
1149                 struct rte_sched_subport *subport = port->subports[i];
1150
1151                 rte_sched_subport_free(port, subport);
1152         }
1153
1154         rte_free(port->subport_profiles);
1155         rte_free(port);
1156 }
1157
1158 int
1159 rte_sched_subport_config(struct rte_sched_port *port,
1160         uint32_t subport_id,
1161         struct rte_sched_subport_params *params)
1162 {
1163         struct rte_sched_subport *s = NULL;
1164         uint32_t n_subports = subport_id;
1165         uint32_t n_subport_pipe_queues, i;
1166         uint32_t size0, size1, bmp_mem_size;
1167         int status;
1168
1169         /* Check user parameters */
1170         if (port == NULL) {
1171                 RTE_LOG(ERR, SCHED,
1172                         "%s: Incorrect value for parameter port\n", __func__);
1173                 return 0;
1174         }
1175
1176         if (subport_id >= port->n_subports_per_port) {
1177                 RTE_LOG(ERR, SCHED,
1178                         "%s: Incorrect value for subport id\n", __func__);
1179
1180                 rte_sched_free_memory(port, n_subports);
1181                 return -EINVAL;
1182         }
1183
1184         status = rte_sched_subport_check_params(params,
1185                 port->n_pipes_per_subport,
1186                 port->rate);
1187         if (status != 0) {
1188                 RTE_LOG(NOTICE, SCHED,
1189                         "%s: Port scheduler params check failed (%d)\n",
1190                         __func__, status);
1191
1192                 rte_sched_free_memory(port, n_subports);
1193                 return -EINVAL;
1194         }
1195
1196         /* Determine the amount of memory to allocate */
1197         size0 = sizeof(struct rte_sched_subport);
1198         size1 = rte_sched_subport_get_array_base(params,
1199                                 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1200
1201         /* Allocate memory to store the data structures */
1202         s = rte_zmalloc_socket("subport_params", size0 + size1,
1203                 RTE_CACHE_LINE_SIZE, port->socket);
1204         if (s == NULL) {
1205                 RTE_LOG(ERR, SCHED,
1206                         "%s: Memory allocation fails\n", __func__);
1207
1208                 rte_sched_free_memory(port, n_subports);
1209                 return -ENOMEM;
1210         }
1211
1212         n_subports++;
1213
1214         /* Port */
1215         port->subports[subport_id] = s;
1216
1217         /* Token Bucket (TB) */
1218         if (params->tb_rate == port->rate) {
1219                 s->tb_credits_per_period = 1;
1220                 s->tb_period = 1;
1221         } else {
1222                 double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
1223                 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
1224
1225                 rte_approx_64(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
1226         }
1227
1228         s->tb_size = params->tb_size;
1229         s->tb_time = port->time;
1230         s->tb_credits = s->tb_size / 2;
1231
1232         /* Traffic Classes (TCs) */
1233         s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
1234         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1235                 if (params->qsize[i])
1236                         s->tc_credits_per_period[i]
1237                                 = rte_sched_time_ms_to_bytes(params->tc_period,
1238                                         params->tc_rate[i]);
1239         }
1240         s->tc_time = port->time + s->tc_period;
1241         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1242                 if (params->qsize[i])
1243                         s->tc_credits[i] = s->tc_credits_per_period[i];
1244
1245         /* compile time checks */
1246         RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1247         RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1248                 (RTE_SCHED_PORT_N_GRINDERS - 1));
1249
1250         /* User parameters */
1251         s->n_pipes_per_subport_enabled = params->n_pipes_per_subport_enabled;
1252         memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1253         s->n_pipe_profiles = params->n_pipe_profiles;
1254         s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1255
1256 #ifdef RTE_SCHED_RED
1257         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1258                 uint32_t j;
1259
1260                 for (j = 0; j < RTE_COLORS; j++) {
1261                         /* if min/max are both zero, then RED is disabled */
1262                         if ((params->red_params[i][j].min_th |
1263                              params->red_params[i][j].max_th) == 0) {
1264                                 continue;
1265                         }
1266
1267                         if (rte_red_config_init(&s->red_config[i][j],
1268                                 params->red_params[i][j].wq_log2,
1269                                 params->red_params[i][j].min_th,
1270                                 params->red_params[i][j].max_th,
1271                                 params->red_params[i][j].maxp_inv) != 0) {
1272                                 rte_sched_free_memory(port, n_subports);
1273
1274                                 RTE_LOG(NOTICE, SCHED,
1275                                 "%s: RED configuration init fails\n", __func__);
1276                                 return -EINVAL;
1277                         }
1278                 }
1279         }
1280 #endif
1281
1282         /* Scheduling loop detection */
1283         s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1284         s->pipe_exhaustion = 0;
1285
1286         /* Grinders */
1287         s->busy_grinders = 0;
1288
1289         /* Queue base calculation */
1290         rte_sched_subport_config_qsize(s);
1291
1292         /* Large data structures */
1293         s->pipe = (struct rte_sched_pipe *)
1294                 (s->memory + rte_sched_subport_get_array_base(params,
1295                 e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1296         s->queue = (struct rte_sched_queue *)
1297                 (s->memory + rte_sched_subport_get_array_base(params,
1298                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1299         s->queue_extra = (struct rte_sched_queue_extra *)
1300                 (s->memory + rte_sched_subport_get_array_base(params,
1301                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1302         s->pipe_profiles = (struct rte_sched_pipe_profile *)
1303                 (s->memory + rte_sched_subport_get_array_base(params,
1304                 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1305         s->bmp_array =  s->memory + rte_sched_subport_get_array_base(params,
1306                 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1307         s->queue_array = (struct rte_mbuf **)
1308                 (s->memory + rte_sched_subport_get_array_base(params,
1309                 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1310
1311         /* Pipe profile table */
1312         rte_sched_subport_config_pipe_profile_table(s, params, port->rate);
1313
1314         /* Bitmap */
1315         n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1316         bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
1317         s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1318                                 bmp_mem_size);
1319         if (s->bmp == NULL) {
1320                 RTE_LOG(ERR, SCHED,
1321                         "%s: Subport bitmap init error\n", __func__);
1322
1323                 rte_sched_free_memory(port, n_subports);
1324                 return -EINVAL;
1325         }
1326
1327         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1328                 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1329
1330 #ifdef RTE_SCHED_SUBPORT_TC_OV
1331         /* TC oversubscription */
1332         s->tc_ov_wm_min = port->mtu;
1333         s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
1334                                                      s->pipe_tc_be_rate_max);
1335         s->tc_ov_wm = s->tc_ov_wm_max;
1336         s->tc_ov_period_id = 0;
1337         s->tc_ov = 0;
1338         s->tc_ov_n = 0;
1339         s->tc_ov_rate = 0;
1340 #endif
1341
1342         rte_sched_port_log_subport_config(port, subport_id);
1343
1344         return 0;
1345 }
1346
1347 int
1348 rte_sched_pipe_config(struct rte_sched_port *port,
1349         uint32_t subport_id,
1350         uint32_t pipe_id,
1351         int32_t pipe_profile)
1352 {
1353         struct rte_sched_subport *s;
1354         struct rte_sched_pipe *p;
1355         struct rte_sched_pipe_profile *params;
1356         uint32_t n_subports = subport_id + 1;
1357         uint32_t deactivate, profile, i;
1358
1359         /* Check user parameters */
1360         profile = (uint32_t) pipe_profile;
1361         deactivate = (pipe_profile < 0);
1362
1363         if (port == NULL) {
1364                 RTE_LOG(ERR, SCHED,
1365                         "%s: Incorrect value for parameter port\n", __func__);
1366                 return -EINVAL;
1367         }
1368
1369         if (subport_id >= port->n_subports_per_port) {
1370                 RTE_LOG(ERR, SCHED,
1371                         "%s: Incorrect value for parameter subport id\n", __func__);
1372
1373                 rte_sched_free_memory(port, n_subports);
1374                 return -EINVAL;
1375         }
1376
1377         s = port->subports[subport_id];
1378         if (pipe_id >= s->n_pipes_per_subport_enabled) {
1379                 RTE_LOG(ERR, SCHED,
1380                         "%s: Incorrect value for parameter pipe id\n", __func__);
1381
1382                 rte_sched_free_memory(port, n_subports);
1383                 return -EINVAL;
1384         }
1385
1386         if (!deactivate && profile >= s->n_pipe_profiles) {
1387                 RTE_LOG(ERR, SCHED,
1388                         "%s: Incorrect value for parameter pipe profile\n", __func__);
1389
1390                 rte_sched_free_memory(port, n_subports);
1391                 return -EINVAL;
1392         }
1393
1394         /* Handle the case when pipe already has a valid configuration */
1395         p = s->pipe + pipe_id;
1396         if (p->tb_time) {
1397                 params = s->pipe_profiles + p->profile;
1398
1399                 double subport_tc_be_rate =
1400                         (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1401                         / (double) s->tc_period;
1402                 double pipe_tc_be_rate =
1403                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1404                         / (double) params->tc_period;
1405                 uint32_t tc_be_ov = s->tc_ov;
1406
1407                 /* Unplug pipe from its subport */
1408                 s->tc_ov_n -= params->tc_ov_weight;
1409                 s->tc_ov_rate -= pipe_tc_be_rate;
1410                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1411
1412                 if (s->tc_ov != tc_be_ov) {
1413                         RTE_LOG(DEBUG, SCHED,
1414                                 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1415                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1416                 }
1417
1418                 /* Reset the pipe */
1419                 memset(p, 0, sizeof(struct rte_sched_pipe));
1420         }
1421
1422         if (deactivate)
1423                 return 0;
1424
1425         /* Apply the new pipe configuration */
1426         p->profile = profile;
1427         params = s->pipe_profiles + p->profile;
1428
1429         /* Token Bucket (TB) */
1430         p->tb_time = port->time;
1431         p->tb_credits = params->tb_size / 2;
1432
1433         /* Traffic Classes (TCs) */
1434         p->tc_time = port->time + params->tc_period;
1435
1436         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1437                 if (s->qsize[i])
1438                         p->tc_credits[i] = params->tc_credits_per_period[i];
1439
1440         {
1441                 /* Subport best effort tc oversubscription */
1442                 double subport_tc_be_rate =
1443                         (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1444                         / (double) s->tc_period;
1445                 double pipe_tc_be_rate =
1446                         (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1447                         / (double) params->tc_period;
1448                 uint32_t tc_be_ov = s->tc_ov;
1449
1450                 s->tc_ov_n += params->tc_ov_weight;
1451                 s->tc_ov_rate += pipe_tc_be_rate;
1452                 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1453
1454                 if (s->tc_ov != tc_be_ov) {
1455                         RTE_LOG(DEBUG, SCHED,
1456                                 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1457                                 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1458                 }
1459                 p->tc_ov_period_id = s->tc_ov_period_id;
1460                 p->tc_ov_credits = s->tc_ov_wm;
1461         }
1462
1463         return 0;
1464 }
1465
1466 int
1467 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1468         uint32_t subport_id,
1469         struct rte_sched_pipe_params *params,
1470         uint32_t *pipe_profile_id)
1471 {
1472         struct rte_sched_subport *s;
1473         struct rte_sched_pipe_profile *pp;
1474         uint32_t i;
1475         int status;
1476
1477         /* Port */
1478         if (port == NULL) {
1479                 RTE_LOG(ERR, SCHED,
1480                         "%s: Incorrect value for parameter port\n", __func__);
1481                 return -EINVAL;
1482         }
1483
1484         /* Subport id not exceeds the max limit */
1485         if (subport_id > port->n_subports_per_port) {
1486                 RTE_LOG(ERR, SCHED,
1487                         "%s: Incorrect value for subport id\n", __func__);
1488                 return -EINVAL;
1489         }
1490
1491         s = port->subports[subport_id];
1492
1493         /* Pipe profiles exceeds the max limit */
1494         if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1495                 RTE_LOG(ERR, SCHED,
1496                         "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1497                 return -EINVAL;
1498         }
1499
1500         /* Pipe params */
1501         status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1502         if (status != 0) {
1503                 RTE_LOG(ERR, SCHED,
1504                         "%s: Pipe profile check failed(%d)\n", __func__, status);
1505                 return -EINVAL;
1506         }
1507
1508         pp = &s->pipe_profiles[s->n_pipe_profiles];
1509         rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1510
1511         /* Pipe profile should not exists */
1512         for (i = 0; i < s->n_pipe_profiles; i++)
1513                 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1514                         RTE_LOG(ERR, SCHED,
1515                                 "%s: Pipe profile exists\n", __func__);
1516                         return -EINVAL;
1517                 }
1518
1519         /* Pipe profile commit */
1520         *pipe_profile_id = s->n_pipe_profiles;
1521         s->n_pipe_profiles++;
1522
1523         if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1524                 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1525
1526         rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1527
1528         return 0;
1529 }
1530
1531 static inline uint32_t
1532 rte_sched_port_qindex(struct rte_sched_port *port,
1533         uint32_t subport,
1534         uint32_t pipe,
1535         uint32_t traffic_class,
1536         uint32_t queue)
1537 {
1538         return ((subport & (port->n_subports_per_port - 1)) <<
1539                 (port->n_pipes_per_subport_log2 + 4)) |
1540                 ((pipe &
1541                 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1542                 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1543                 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1544 }
1545
1546 void
1547 rte_sched_port_pkt_write(struct rte_sched_port *port,
1548                          struct rte_mbuf *pkt,
1549                          uint32_t subport, uint32_t pipe,
1550                          uint32_t traffic_class,
1551                          uint32_t queue, enum rte_color color)
1552 {
1553         uint32_t queue_id =
1554                 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1555
1556         rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1557 }
1558
1559 void
1560 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1561                                   const struct rte_mbuf *pkt,
1562                                   uint32_t *subport, uint32_t *pipe,
1563                                   uint32_t *traffic_class, uint32_t *queue)
1564 {
1565         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1566
1567         *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1568         *pipe = (queue_id >> 4) &
1569                 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1570         *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1571         *queue = rte_sched_port_tc_queue(port, queue_id);
1572 }
1573
1574 enum rte_color
1575 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1576 {
1577         return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1578 }
1579
1580 int
1581 rte_sched_subport_read_stats(struct rte_sched_port *port,
1582                              uint32_t subport_id,
1583                              struct rte_sched_subport_stats *stats,
1584                              uint32_t *tc_ov)
1585 {
1586         struct rte_sched_subport *s;
1587
1588         /* Check user parameters */
1589         if (port == NULL) {
1590                 RTE_LOG(ERR, SCHED,
1591                         "%s: Incorrect value for parameter port\n", __func__);
1592                 return -EINVAL;
1593         }
1594
1595         if (subport_id >= port->n_subports_per_port) {
1596                 RTE_LOG(ERR, SCHED,
1597                         "%s: Incorrect value for subport id\n", __func__);
1598                 return -EINVAL;
1599         }
1600
1601         if (stats == NULL) {
1602                 RTE_LOG(ERR, SCHED,
1603                         "%s: Incorrect value for parameter stats\n", __func__);
1604                 return -EINVAL;
1605         }
1606
1607         if (tc_ov == NULL) {
1608                 RTE_LOG(ERR, SCHED,
1609                         "%s: Incorrect value for tc_ov\n", __func__);
1610                 return -EINVAL;
1611         }
1612
1613         s = port->subports[subport_id];
1614
1615         /* Copy subport stats and clear */
1616         memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1617         memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1618
1619         /* Subport TC oversubscription status */
1620         *tc_ov = s->tc_ov;
1621
1622         return 0;
1623 }
1624
1625 int
1626 rte_sched_queue_read_stats(struct rte_sched_port *port,
1627         uint32_t queue_id,
1628         struct rte_sched_queue_stats *stats,
1629         uint16_t *qlen)
1630 {
1631         struct rte_sched_subport *s;
1632         struct rte_sched_queue *q;
1633         struct rte_sched_queue_extra *qe;
1634         uint32_t subport_id, subport_qmask, subport_qindex;
1635
1636         /* Check user parameters */
1637         if (port == NULL) {
1638                 RTE_LOG(ERR, SCHED,
1639                         "%s: Incorrect value for parameter port\n", __func__);
1640                 return -EINVAL;
1641         }
1642
1643         if (queue_id >= rte_sched_port_queues_per_port(port)) {
1644                 RTE_LOG(ERR, SCHED,
1645                         "%s: Incorrect value for queue id\n", __func__);
1646                 return -EINVAL;
1647         }
1648
1649         if (stats == NULL) {
1650                 RTE_LOG(ERR, SCHED,
1651                         "%s: Incorrect value for parameter stats\n", __func__);
1652                 return -EINVAL;
1653         }
1654
1655         if (qlen == NULL) {
1656                 RTE_LOG(ERR, SCHED,
1657                         "%s: Incorrect value for parameter qlen\n", __func__);
1658                 return -EINVAL;
1659         }
1660         subport_qmask = port->n_pipes_per_subport_log2 + 4;
1661         subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1662
1663         s = port->subports[subport_id];
1664         subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1665         q = s->queue + subport_qindex;
1666         qe = s->queue_extra + subport_qindex;
1667
1668         /* Copy queue stats and clear */
1669         memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1670         memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1671
1672         /* Queue length */
1673         *qlen = q->qw - q->qr;
1674
1675         return 0;
1676 }
1677
1678 #ifdef RTE_SCHED_DEBUG
1679
1680 static inline int
1681 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1682         uint32_t qindex)
1683 {
1684         struct rte_sched_queue *queue = subport->queue + qindex;
1685
1686         return queue->qr == queue->qw;
1687 }
1688
1689 #endif /* RTE_SCHED_DEBUG */
1690
1691 #ifdef RTE_SCHED_COLLECT_STATS
1692
1693 static inline void
1694 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1695         struct rte_sched_subport *subport,
1696         uint32_t qindex,
1697         struct rte_mbuf *pkt)
1698 {
1699         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1700         uint32_t pkt_len = pkt->pkt_len;
1701
1702         subport->stats.n_pkts_tc[tc_index] += 1;
1703         subport->stats.n_bytes_tc[tc_index] += pkt_len;
1704 }
1705
1706 #ifdef RTE_SCHED_RED
1707 static inline void
1708 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1709         struct rte_sched_subport *subport,
1710         uint32_t qindex,
1711         struct rte_mbuf *pkt,
1712         uint32_t red)
1713 #else
1714 static inline void
1715 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1716         struct rte_sched_subport *subport,
1717         uint32_t qindex,
1718         struct rte_mbuf *pkt,
1719         __rte_unused uint32_t red)
1720 #endif
1721 {
1722         uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1723         uint32_t pkt_len = pkt->pkt_len;
1724
1725         subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1726         subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1727 #ifdef RTE_SCHED_RED
1728         subport->stats.n_pkts_red_dropped[tc_index] += red;
1729 #endif
1730 }
1731
1732 static inline void
1733 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1734         uint32_t qindex,
1735         struct rte_mbuf *pkt)
1736 {
1737         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1738         uint32_t pkt_len = pkt->pkt_len;
1739
1740         qe->stats.n_pkts += 1;
1741         qe->stats.n_bytes += pkt_len;
1742 }
1743
1744 #ifdef RTE_SCHED_RED
1745 static inline void
1746 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1747         uint32_t qindex,
1748         struct rte_mbuf *pkt,
1749         uint32_t red)
1750 #else
1751 static inline void
1752 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1753         uint32_t qindex,
1754         struct rte_mbuf *pkt,
1755         __rte_unused uint32_t red)
1756 #endif
1757 {
1758         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1759         uint32_t pkt_len = pkt->pkt_len;
1760
1761         qe->stats.n_pkts_dropped += 1;
1762         qe->stats.n_bytes_dropped += pkt_len;
1763 #ifdef RTE_SCHED_RED
1764         qe->stats.n_pkts_red_dropped += red;
1765 #endif
1766 }
1767
1768 #endif /* RTE_SCHED_COLLECT_STATS */
1769
1770 #ifdef RTE_SCHED_RED
1771
1772 static inline int
1773 rte_sched_port_red_drop(struct rte_sched_port *port,
1774         struct rte_sched_subport *subport,
1775         struct rte_mbuf *pkt,
1776         uint32_t qindex,
1777         uint16_t qlen)
1778 {
1779         struct rte_sched_queue_extra *qe;
1780         struct rte_red_config *red_cfg;
1781         struct rte_red *red;
1782         uint32_t tc_index;
1783         enum rte_color color;
1784
1785         tc_index = rte_sched_port_pipe_tc(port, qindex);
1786         color = rte_sched_port_pkt_read_color(pkt);
1787         red_cfg = &subport->red_config[tc_index][color];
1788
1789         if ((red_cfg->min_th | red_cfg->max_th) == 0)
1790                 return 0;
1791
1792         qe = subport->queue_extra + qindex;
1793         red = &qe->red;
1794
1795         return rte_red_enqueue(red_cfg, red, qlen, port->time);
1796 }
1797
1798 static inline void
1799 rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,
1800         struct rte_sched_subport *subport, uint32_t qindex)
1801 {
1802         struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1803         struct rte_red *red = &qe->red;
1804
1805         rte_red_mark_queue_empty(red, port->time);
1806 }
1807
1808 #else
1809
1810 static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,
1811         struct rte_sched_subport *subport __rte_unused,
1812         struct rte_mbuf *pkt __rte_unused,
1813         uint32_t qindex __rte_unused,
1814         uint16_t qlen __rte_unused)
1815 {
1816         return 0;
1817 }
1818
1819 #define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex)
1820
1821 #endif /* RTE_SCHED_RED */
1822
1823 #ifdef RTE_SCHED_DEBUG
1824
1825 static inline void
1826 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1827                        uint64_t bmp_slab)
1828 {
1829         uint64_t mask;
1830         uint32_t i, panic;
1831
1832         if (bmp_slab == 0)
1833                 rte_panic("Empty slab at position %u\n", bmp_pos);
1834
1835         panic = 0;
1836         for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1837                 if (mask & bmp_slab) {
1838                         if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1839                                 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1840                                 panic = 1;
1841                         }
1842                 }
1843         }
1844
1845         if (panic)
1846                 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1847                         bmp_slab, bmp_pos);
1848 }
1849
1850 #endif /* RTE_SCHED_DEBUG */
1851
1852 static inline struct rte_sched_subport *
1853 rte_sched_port_subport(struct rte_sched_port *port,
1854         struct rte_mbuf *pkt)
1855 {
1856         uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1857         uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1858
1859         return port->subports[subport_id];
1860 }
1861
1862 static inline uint32_t
1863 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1864         struct rte_mbuf *pkt, uint32_t subport_qmask)
1865 {
1866         struct rte_sched_queue *q;
1867 #ifdef RTE_SCHED_COLLECT_STATS
1868         struct rte_sched_queue_extra *qe;
1869 #endif
1870         uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1871         uint32_t subport_queue_id = subport_qmask & qindex;
1872
1873         q = subport->queue + subport_queue_id;
1874         rte_prefetch0(q);
1875 #ifdef RTE_SCHED_COLLECT_STATS
1876         qe = subport->queue_extra + subport_queue_id;
1877         rte_prefetch0(qe);
1878 #endif
1879
1880         return subport_queue_id;
1881 }
1882
1883 static inline void
1884 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1885         struct rte_sched_subport *subport,
1886         uint32_t qindex,
1887         struct rte_mbuf **qbase)
1888 {
1889         struct rte_sched_queue *q;
1890         struct rte_mbuf **q_qw;
1891         uint16_t qsize;
1892
1893         q = subport->queue + qindex;
1894         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1895         q_qw = qbase + (q->qw & (qsize - 1));
1896
1897         rte_prefetch0(q_qw);
1898         rte_bitmap_prefetch0(subport->bmp, qindex);
1899 }
1900
1901 static inline int
1902 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
1903         struct rte_sched_subport *subport,
1904         uint32_t qindex,
1905         struct rte_mbuf **qbase,
1906         struct rte_mbuf *pkt)
1907 {
1908         struct rte_sched_queue *q;
1909         uint16_t qsize;
1910         uint16_t qlen;
1911
1912         q = subport->queue + qindex;
1913         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1914         qlen = q->qw - q->qr;
1915
1916         /* Drop the packet (and update drop stats) when queue is full */
1917         if (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||
1918                      (qlen >= qsize))) {
1919                 rte_pktmbuf_free(pkt);
1920 #ifdef RTE_SCHED_COLLECT_STATS
1921                 rte_sched_port_update_subport_stats_on_drop(port, subport,
1922                         qindex, pkt, qlen < qsize);
1923                 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
1924                         qlen < qsize);
1925 #endif
1926                 return 0;
1927         }
1928
1929         /* Enqueue packet */
1930         qbase[q->qw & (qsize - 1)] = pkt;
1931         q->qw++;
1932
1933         /* Activate queue in the subport bitmap */
1934         rte_bitmap_set(subport->bmp, qindex);
1935
1936         /* Statistics */
1937 #ifdef RTE_SCHED_COLLECT_STATS
1938         rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
1939         rte_sched_port_update_queue_stats(subport, qindex, pkt);
1940 #endif
1941
1942         return 1;
1943 }
1944
1945
1946 /*
1947  * The enqueue function implements a 4-level pipeline with each stage
1948  * processing two different packets. The purpose of using a pipeline
1949  * is to hide the latency of prefetching the data structures. The
1950  * naming convention is presented in the diagram below:
1951  *
1952  *   p00  _______   p10  _______   p20  _______   p30  _______
1953  * ----->|       |----->|       |----->|       |----->|       |----->
1954  *       |   0   |      |   1   |      |   2   |      |   3   |
1955  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
1956  *   p01            p11            p21            p31
1957  *
1958  */
1959 int
1960 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
1961                        uint32_t n_pkts)
1962 {
1963         struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
1964                 *pkt30, *pkt31, *pkt_last;
1965         struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
1966                 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
1967         struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
1968                 *subport20, *subport21, *subport30, *subport31, *subport_last;
1969         uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
1970         uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
1971         uint32_t subport_qmask;
1972         uint32_t result, i;
1973
1974         result = 0;
1975         subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
1976
1977         /*
1978          * Less then 6 input packets available, which is not enough to
1979          * feed the pipeline
1980          */
1981         if (unlikely(n_pkts < 6)) {
1982                 struct rte_sched_subport *subports[5];
1983                 struct rte_mbuf **q_base[5];
1984                 uint32_t q[5];
1985
1986                 /* Prefetch the mbuf structure of each packet */
1987                 for (i = 0; i < n_pkts; i++)
1988                         rte_prefetch0(pkts[i]);
1989
1990                 /* Prefetch the subport structure for each packet */
1991                 for (i = 0; i < n_pkts; i++)
1992                         subports[i] = rte_sched_port_subport(port, pkts[i]);
1993
1994                 /* Prefetch the queue structure for each queue */
1995                 for (i = 0; i < n_pkts; i++)
1996                         q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
1997                                         pkts[i], subport_qmask);
1998
1999                 /* Prefetch the write pointer location of each queue */
2000                 for (i = 0; i < n_pkts; i++) {
2001                         q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2002                         rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2003                                 q[i], q_base[i]);
2004                 }
2005
2006                 /* Write each packet to its queue */
2007                 for (i = 0; i < n_pkts; i++)
2008                         result += rte_sched_port_enqueue_qwa(port, subports[i],
2009                                                 q[i], q_base[i], pkts[i]);
2010
2011                 return result;
2012         }
2013
2014         /* Feed the first 3 stages of the pipeline (6 packets needed) */
2015         pkt20 = pkts[0];
2016         pkt21 = pkts[1];
2017         rte_prefetch0(pkt20);
2018         rte_prefetch0(pkt21);
2019
2020         pkt10 = pkts[2];
2021         pkt11 = pkts[3];
2022         rte_prefetch0(pkt10);
2023         rte_prefetch0(pkt11);
2024
2025         subport20 = rte_sched_port_subport(port, pkt20);
2026         subport21 = rte_sched_port_subport(port, pkt21);
2027         q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2028                         pkt20, subport_qmask);
2029         q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2030                         pkt21, subport_qmask);
2031
2032         pkt00 = pkts[4];
2033         pkt01 = pkts[5];
2034         rte_prefetch0(pkt00);
2035         rte_prefetch0(pkt01);
2036
2037         subport10 = rte_sched_port_subport(port, pkt10);
2038         subport11 = rte_sched_port_subport(port, pkt11);
2039         q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2040                         pkt10, subport_qmask);
2041         q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2042                         pkt11, subport_qmask);
2043
2044         q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2045         q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2046         rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2047         rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2048
2049         /* Run the pipeline */
2050         for (i = 6; i < (n_pkts & (~1)); i += 2) {
2051                 /* Propagate stage inputs */
2052                 pkt30 = pkt20;
2053                 pkt31 = pkt21;
2054                 pkt20 = pkt10;
2055                 pkt21 = pkt11;
2056                 pkt10 = pkt00;
2057                 pkt11 = pkt01;
2058                 q30 = q20;
2059                 q31 = q21;
2060                 q20 = q10;
2061                 q21 = q11;
2062                 subport30 = subport20;
2063                 subport31 = subport21;
2064                 subport20 = subport10;
2065                 subport21 = subport11;
2066                 q30_base = q20_base;
2067                 q31_base = q21_base;
2068
2069                 /* Stage 0: Get packets in */
2070                 pkt00 = pkts[i];
2071                 pkt01 = pkts[i + 1];
2072                 rte_prefetch0(pkt00);
2073                 rte_prefetch0(pkt01);
2074
2075                 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2076                 subport10 = rte_sched_port_subport(port, pkt10);
2077                 subport11 = rte_sched_port_subport(port, pkt11);
2078                 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2079                                 pkt10, subport_qmask);
2080                 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2081                                 pkt11, subport_qmask);
2082
2083                 /* Stage 2: Prefetch queue write location */
2084                 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2085                 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2086                 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2087                 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2088
2089                 /* Stage 3: Write packet to queue and activate queue */
2090                 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2091                                 q30, q30_base, pkt30);
2092                 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2093                                 q31, q31_base, pkt31);
2094                 result += r30 + r31;
2095         }
2096
2097         /*
2098          * Drain the pipeline (exactly 6 packets).
2099          * Handle the last packet in the case
2100          * of an odd number of input packets.
2101          */
2102         pkt_last = pkts[n_pkts - 1];
2103         rte_prefetch0(pkt_last);
2104
2105         subport00 = rte_sched_port_subport(port, pkt00);
2106         subport01 = rte_sched_port_subport(port, pkt01);
2107         q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2108                         pkt00, subport_qmask);
2109         q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2110                         pkt01, subport_qmask);
2111
2112         q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2113         q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2114         rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2115         rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2116
2117         r20 = rte_sched_port_enqueue_qwa(port, subport20,
2118                         q20, q20_base, pkt20);
2119         r21 = rte_sched_port_enqueue_qwa(port, subport21,
2120                         q21, q21_base, pkt21);
2121         result += r20 + r21;
2122
2123         subport_last = rte_sched_port_subport(port, pkt_last);
2124         q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2125                                 pkt_last, subport_qmask);
2126
2127         q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2128         q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2129         rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2130         rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2131
2132         r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2133                         q10_base, pkt10);
2134         r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2135                         q11_base, pkt11);
2136         result += r10 + r11;
2137
2138         q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2139         rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2140                 q_last, q_last_base);
2141
2142         r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2143                         q00_base, pkt00);
2144         r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2145                         q01_base, pkt01);
2146         result += r00 + r01;
2147
2148         if (n_pkts & 1) {
2149                 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2150                                         q_last, q_last_base, pkt_last);
2151                 result += r_last;
2152         }
2153
2154         return result;
2155 }
2156
2157 #ifndef RTE_SCHED_SUBPORT_TC_OV
2158
2159 static inline void
2160 grinder_credits_update(struct rte_sched_port *port,
2161         struct rte_sched_subport *subport, uint32_t pos)
2162 {
2163         struct rte_sched_grinder *grinder = subport->grinder + pos;
2164         struct rte_sched_pipe *pipe = grinder->pipe;
2165         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2166         uint64_t n_periods;
2167         uint32_t i;
2168
2169         /* Subport TB */
2170         n_periods = (port->time - subport->tb_time) / subport->tb_period;
2171         subport->tb_credits += n_periods * subport->tb_credits_per_period;
2172         subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
2173         subport->tb_time += n_periods * subport->tb_period;
2174
2175         /* Pipe TB */
2176         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2177         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2178         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2179         pipe->tb_time += n_periods * params->tb_period;
2180
2181         /* Subport TCs */
2182         if (unlikely(port->time >= subport->tc_time)) {
2183                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2184                         subport->tc_credits[i] = subport->tc_credits_per_period[i];
2185
2186                 subport->tc_time = port->time + subport->tc_period;
2187         }
2188
2189         /* Pipe TCs */
2190         if (unlikely(port->time >= pipe->tc_time)) {
2191                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2192                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2193
2194                 pipe->tc_time = port->time + params->tc_period;
2195         }
2196 }
2197
2198 #else
2199
2200 static inline uint64_t
2201 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2202         struct rte_sched_subport *subport)
2203 {
2204         uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2205         uint64_t tc_consumption = 0, tc_ov_consumption_max;
2206         uint64_t tc_ov_wm = subport->tc_ov_wm;
2207         uint32_t i;
2208
2209         if (subport->tc_ov == 0)
2210                 return subport->tc_ov_wm_max;
2211
2212         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2213                 tc_ov_consumption[i] =
2214                         subport->tc_credits_per_period[i] - subport->tc_credits[i];
2215                 tc_consumption += tc_ov_consumption[i];
2216         }
2217
2218         tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2219                 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2220                 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2221
2222         tc_ov_consumption_max =
2223                 subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2224                         tc_consumption;
2225
2226         if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2227                 (tc_ov_consumption_max - port->mtu)) {
2228                 tc_ov_wm  -= tc_ov_wm >> 7;
2229                 if (tc_ov_wm < subport->tc_ov_wm_min)
2230                         tc_ov_wm = subport->tc_ov_wm_min;
2231
2232                 return tc_ov_wm;
2233         }
2234
2235         tc_ov_wm += (tc_ov_wm >> 7) + 1;
2236         if (tc_ov_wm > subport->tc_ov_wm_max)
2237                 tc_ov_wm = subport->tc_ov_wm_max;
2238
2239         return tc_ov_wm;
2240 }
2241
2242 static inline void
2243 grinder_credits_update(struct rte_sched_port *port,
2244         struct rte_sched_subport *subport, uint32_t pos)
2245 {
2246         struct rte_sched_grinder *grinder = subport->grinder + pos;
2247         struct rte_sched_pipe *pipe = grinder->pipe;
2248         struct rte_sched_pipe_profile *params = grinder->pipe_params;
2249         uint64_t n_periods;
2250         uint32_t i;
2251
2252         /* Subport TB */
2253         n_periods = (port->time - subport->tb_time) / subport->tb_period;
2254         subport->tb_credits += n_periods * subport->tb_credits_per_period;
2255         subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
2256         subport->tb_time += n_periods * subport->tb_period;
2257
2258         /* Pipe TB */
2259         n_periods = (port->time - pipe->tb_time) / params->tb_period;
2260         pipe->tb_credits += n_periods * params->tb_credits_per_period;
2261         pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2262         pipe->tb_time += n_periods * params->tb_period;
2263
2264         /* Subport TCs */
2265         if (unlikely(port->time >= subport->tc_time)) {
2266                 subport->tc_ov_wm = grinder_tc_ov_credits_update(port, subport);
2267
2268                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2269                         subport->tc_credits[i] = subport->tc_credits_per_period[i];
2270
2271                 subport->tc_time = port->time + subport->tc_period;
2272                 subport->tc_ov_period_id++;
2273         }
2274
2275         /* Pipe TCs */
2276         if (unlikely(port->time >= pipe->tc_time)) {
2277                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2278                         pipe->tc_credits[i] = params->tc_credits_per_period[i];
2279                 pipe->tc_time = port->time + params->tc_period;
2280         }
2281
2282         /* Pipe TCs - Oversubscription */
2283         if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2284                 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2285
2286                 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2287         }
2288 }
2289
2290 #endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
2291
2292
2293 #ifndef RTE_SCHED_SUBPORT_TC_OV
2294
2295 static inline int
2296 grinder_credits_check(struct rte_sched_port *port,
2297         struct rte_sched_subport *subport, uint32_t pos)
2298 {
2299         struct rte_sched_grinder *grinder = subport->grinder + pos;
2300         struct rte_sched_pipe *pipe = grinder->pipe;
2301         struct rte_mbuf *pkt = grinder->pkt;
2302         uint32_t tc_index = grinder->tc_index;
2303         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2304         uint64_t subport_tb_credits = subport->tb_credits;
2305         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2306         uint64_t pipe_tb_credits = pipe->tb_credits;
2307         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2308         int enough_credits;
2309
2310         /* Check queue credits */
2311         enough_credits = (pkt_len <= subport_tb_credits) &&
2312                 (pkt_len <= subport_tc_credits) &&
2313                 (pkt_len <= pipe_tb_credits) &&
2314                 (pkt_len <= pipe_tc_credits);
2315
2316         if (!enough_credits)
2317                 return 0;
2318
2319         /* Update port credits */
2320         subport->tb_credits -= pkt_len;
2321         subport->tc_credits[tc_index] -= pkt_len;
2322         pipe->tb_credits -= pkt_len;
2323         pipe->tc_credits[tc_index] -= pkt_len;
2324
2325         return 1;
2326 }
2327
2328 #else
2329
2330 static inline int
2331 grinder_credits_check(struct rte_sched_port *port,
2332         struct rte_sched_subport *subport, uint32_t pos)
2333 {
2334         struct rte_sched_grinder *grinder = subport->grinder + pos;
2335         struct rte_sched_pipe *pipe = grinder->pipe;
2336         struct rte_mbuf *pkt = grinder->pkt;
2337         uint32_t tc_index = grinder->tc_index;
2338         uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2339         uint64_t subport_tb_credits = subport->tb_credits;
2340         uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2341         uint64_t pipe_tb_credits = pipe->tb_credits;
2342         uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2343         uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2344         uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2345         uint64_t pipe_tc_ov_credits;
2346         uint32_t i;
2347         int enough_credits;
2348
2349         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2350                 pipe_tc_ov_mask1[i] = ~0LLU;
2351
2352         pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2353         pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2354         pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2355
2356         /* Check pipe and subport credits */
2357         enough_credits = (pkt_len <= subport_tb_credits) &&
2358                 (pkt_len <= subport_tc_credits) &&
2359                 (pkt_len <= pipe_tb_credits) &&
2360                 (pkt_len <= pipe_tc_credits) &&
2361                 (pkt_len <= pipe_tc_ov_credits);
2362
2363         if (!enough_credits)
2364                 return 0;
2365
2366         /* Update pipe and subport credits */
2367         subport->tb_credits -= pkt_len;
2368         subport->tc_credits[tc_index] -= pkt_len;
2369         pipe->tb_credits -= pkt_len;
2370         pipe->tc_credits[tc_index] -= pkt_len;
2371         pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2372
2373         return 1;
2374 }
2375
2376 #endif /* RTE_SCHED_SUBPORT_TC_OV */
2377
2378
2379 static inline int
2380 grinder_schedule(struct rte_sched_port *port,
2381         struct rte_sched_subport *subport, uint32_t pos)
2382 {
2383         struct rte_sched_grinder *grinder = subport->grinder + pos;
2384         struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2385         struct rte_mbuf *pkt = grinder->pkt;
2386         uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2387         uint32_t be_tc_active;
2388
2389         if (!grinder_credits_check(port, subport, pos))
2390                 return 0;
2391
2392         /* Advance port time */
2393         port->time += pkt_len;
2394
2395         /* Send packet */
2396         port->pkts_out[port->n_pkts_out++] = pkt;
2397         queue->qr++;
2398
2399         be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2400         grinder->wrr_tokens[grinder->qpos] +=
2401                 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2402
2403         if (queue->qr == queue->qw) {
2404                 uint32_t qindex = grinder->qindex[grinder->qpos];
2405
2406                 rte_bitmap_clear(subport->bmp, qindex);
2407                 grinder->qmask &= ~(1 << grinder->qpos);
2408                 if (be_tc_active)
2409                         grinder->wrr_mask[grinder->qpos] = 0;
2410                 rte_sched_port_set_queue_empty_timestamp(port, subport, qindex);
2411         }
2412
2413         /* Reset pipe loop detection */
2414         subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2415         grinder->productive = 1;
2416
2417         return 1;
2418 }
2419
2420 #ifdef SCHED_VECTOR_SSE4
2421
2422 static inline int
2423 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2424 {
2425         __m128i index = _mm_set1_epi32(base_pipe);
2426         __m128i pipes = _mm_load_si128((__m128i *)subport->grinder_base_bmp_pos);
2427         __m128i res = _mm_cmpeq_epi32(pipes, index);
2428
2429         pipes = _mm_load_si128((__m128i *)(subport->grinder_base_bmp_pos + 4));
2430         pipes = _mm_cmpeq_epi32(pipes, index);
2431         res = _mm_or_si128(res, pipes);
2432
2433         if (_mm_testz_si128(res, res))
2434                 return 0;
2435
2436         return 1;
2437 }
2438
2439 #elif defined(SCHED_VECTOR_NEON)
2440
2441 static inline int
2442 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2443 {
2444         uint32x4_t index, pipes;
2445         uint32_t *pos = (uint32_t *)subport->grinder_base_bmp_pos;
2446
2447         index = vmovq_n_u32(base_pipe);
2448         pipes = vld1q_u32(pos);
2449         if (!vminvq_u32(veorq_u32(pipes, index)))
2450                 return 1;
2451
2452         pipes = vld1q_u32(pos + 4);
2453         if (!vminvq_u32(veorq_u32(pipes, index)))
2454                 return 1;
2455
2456         return 0;
2457 }
2458
2459 #else
2460
2461 static inline int
2462 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2463 {
2464         uint32_t i;
2465
2466         for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2467                 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2468                         return 1;
2469         }
2470
2471         return 0;
2472 }
2473
2474 #endif /* RTE_SCHED_OPTIMIZATIONS */
2475
2476 static inline void
2477 grinder_pcache_populate(struct rte_sched_subport *subport,
2478         uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2479 {
2480         struct rte_sched_grinder *grinder = subport->grinder + pos;
2481         uint16_t w[4];
2482
2483         grinder->pcache_w = 0;
2484         grinder->pcache_r = 0;
2485
2486         w[0] = (uint16_t) bmp_slab;
2487         w[1] = (uint16_t) (bmp_slab >> 16);
2488         w[2] = (uint16_t) (bmp_slab >> 32);
2489         w[3] = (uint16_t) (bmp_slab >> 48);
2490
2491         grinder->pcache_qmask[grinder->pcache_w] = w[0];
2492         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2493         grinder->pcache_w += (w[0] != 0);
2494
2495         grinder->pcache_qmask[grinder->pcache_w] = w[1];
2496         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2497         grinder->pcache_w += (w[1] != 0);
2498
2499         grinder->pcache_qmask[grinder->pcache_w] = w[2];
2500         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2501         grinder->pcache_w += (w[2] != 0);
2502
2503         grinder->pcache_qmask[grinder->pcache_w] = w[3];
2504         grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2505         grinder->pcache_w += (w[3] != 0);
2506 }
2507
2508 static inline void
2509 grinder_tccache_populate(struct rte_sched_subport *subport,
2510         uint32_t pos, uint32_t qindex, uint16_t qmask)
2511 {
2512         struct rte_sched_grinder *grinder = subport->grinder + pos;
2513         uint8_t b, i;
2514
2515         grinder->tccache_w = 0;
2516         grinder->tccache_r = 0;
2517
2518         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2519                 b = (uint8_t) ((qmask >> i) & 0x1);
2520                 grinder->tccache_qmask[grinder->tccache_w] = b;
2521                 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2522                 grinder->tccache_w += (b != 0);
2523         }
2524
2525         b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2526         grinder->tccache_qmask[grinder->tccache_w] = b;
2527         grinder->tccache_qindex[grinder->tccache_w] = qindex +
2528                 RTE_SCHED_TRAFFIC_CLASS_BE;
2529         grinder->tccache_w += (b != 0);
2530 }
2531
2532 static inline int
2533 grinder_next_tc(struct rte_sched_port *port,
2534         struct rte_sched_subport *subport, uint32_t pos)
2535 {
2536         struct rte_sched_grinder *grinder = subport->grinder + pos;
2537         struct rte_mbuf **qbase;
2538         uint32_t qindex;
2539         uint16_t qsize;
2540
2541         if (grinder->tccache_r == grinder->tccache_w)
2542                 return 0;
2543
2544         qindex = grinder->tccache_qindex[grinder->tccache_r];
2545         qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2546         qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2547
2548         grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2549         grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2550         grinder->qsize = qsize;
2551
2552         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2553                 grinder->queue[0] = subport->queue + qindex;
2554                 grinder->qbase[0] = qbase;
2555                 grinder->qindex[0] = qindex;
2556                 grinder->tccache_r++;
2557
2558                 return 1;
2559         }
2560
2561         grinder->queue[0] = subport->queue + qindex;
2562         grinder->queue[1] = subport->queue + qindex + 1;
2563         grinder->queue[2] = subport->queue + qindex + 2;
2564         grinder->queue[3] = subport->queue + qindex + 3;
2565
2566         grinder->qbase[0] = qbase;
2567         grinder->qbase[1] = qbase + qsize;
2568         grinder->qbase[2] = qbase + 2 * qsize;
2569         grinder->qbase[3] = qbase + 3 * qsize;
2570
2571         grinder->qindex[0] = qindex;
2572         grinder->qindex[1] = qindex + 1;
2573         grinder->qindex[2] = qindex + 2;
2574         grinder->qindex[3] = qindex + 3;
2575
2576         grinder->tccache_r++;
2577         return 1;
2578 }
2579
2580 static inline int
2581 grinder_next_pipe(struct rte_sched_port *port,
2582         struct rte_sched_subport *subport, uint32_t pos)
2583 {
2584         struct rte_sched_grinder *grinder = subport->grinder + pos;
2585         uint32_t pipe_qindex;
2586         uint16_t pipe_qmask;
2587
2588         if (grinder->pcache_r < grinder->pcache_w) {
2589                 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2590                 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2591                 grinder->pcache_r++;
2592         } else {
2593                 uint64_t bmp_slab = 0;
2594                 uint32_t bmp_pos = 0;
2595
2596                 /* Get another non-empty pipe group */
2597                 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2598                         return 0;
2599
2600 #ifdef RTE_SCHED_DEBUG
2601                 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2602 #endif
2603
2604                 /* Return if pipe group already in one of the other grinders */
2605                 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2606                 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2607                         return 0;
2608
2609                 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2610
2611                 /* Install new pipe group into grinder's pipe cache */
2612                 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2613
2614                 pipe_qmask = grinder->pcache_qmask[0];
2615                 pipe_qindex = grinder->pcache_qindex[0];
2616                 grinder->pcache_r = 1;
2617         }
2618
2619         /* Install new pipe in the grinder */
2620         grinder->pindex = pipe_qindex >> 4;
2621         grinder->subport = subport;
2622         grinder->pipe = subport->pipe + grinder->pindex;
2623         grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2624         grinder->productive = 0;
2625
2626         grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2627         grinder_next_tc(port, subport, pos);
2628
2629         /* Check for pipe exhaustion */
2630         if (grinder->pindex == subport->pipe_loop) {
2631                 subport->pipe_exhaustion = 1;
2632                 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2633         }
2634
2635         return 1;
2636 }
2637
2638
2639 static inline void
2640 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2641 {
2642         struct rte_sched_grinder *grinder = subport->grinder + pos;
2643         struct rte_sched_pipe *pipe = grinder->pipe;
2644         struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2645         uint32_t qmask = grinder->qmask;
2646
2647         grinder->wrr_tokens[0] =
2648                 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2649         grinder->wrr_tokens[1] =
2650                 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2651         grinder->wrr_tokens[2] =
2652                 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2653         grinder->wrr_tokens[3] =
2654                 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2655
2656         grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2657         grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2658         grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2659         grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2660
2661         grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2662         grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2663         grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2664         grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2665 }
2666
2667 static inline void
2668 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2669 {
2670         struct rte_sched_grinder *grinder = subport->grinder + pos;
2671         struct rte_sched_pipe *pipe = grinder->pipe;
2672
2673         pipe->wrr_tokens[0] =
2674                         (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2675                                 RTE_SCHED_WRR_SHIFT;
2676         pipe->wrr_tokens[1] =
2677                         (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2678                                 RTE_SCHED_WRR_SHIFT;
2679         pipe->wrr_tokens[2] =
2680                         (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2681                                 RTE_SCHED_WRR_SHIFT;
2682         pipe->wrr_tokens[3] =
2683                         (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2684                                 RTE_SCHED_WRR_SHIFT;
2685 }
2686
2687 static inline void
2688 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2689 {
2690         struct rte_sched_grinder *grinder = subport->grinder + pos;
2691         uint16_t wrr_tokens_min;
2692
2693         grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2694         grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2695         grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2696         grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2697
2698         grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2699         wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2700
2701         grinder->wrr_tokens[0] -= wrr_tokens_min;
2702         grinder->wrr_tokens[1] -= wrr_tokens_min;
2703         grinder->wrr_tokens[2] -= wrr_tokens_min;
2704         grinder->wrr_tokens[3] -= wrr_tokens_min;
2705 }
2706
2707
2708 #define grinder_evict(subport, pos)
2709
2710 static inline void
2711 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2712 {
2713         struct rte_sched_grinder *grinder = subport->grinder + pos;
2714
2715         rte_prefetch0(grinder->pipe);
2716         rte_prefetch0(grinder->queue[0]);
2717 }
2718
2719 static inline void
2720 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2721 {
2722         struct rte_sched_grinder *grinder = subport->grinder + pos;
2723         uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2724
2725         qsize = grinder->qsize;
2726         grinder->qpos = 0;
2727
2728         if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2729                 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2730
2731                 rte_prefetch0(grinder->qbase[0] + qr[0]);
2732                 return;
2733         }
2734
2735         qr[0] = grinder->queue[0]->qr & (qsize - 1);
2736         qr[1] = grinder->queue[1]->qr & (qsize - 1);
2737         qr[2] = grinder->queue[2]->qr & (qsize - 1);
2738         qr[3] = grinder->queue[3]->qr & (qsize - 1);
2739
2740         rte_prefetch0(grinder->qbase[0] + qr[0]);
2741         rte_prefetch0(grinder->qbase[1] + qr[1]);
2742
2743         grinder_wrr_load(subport, pos);
2744         grinder_wrr(subport, pos);
2745
2746         rte_prefetch0(grinder->qbase[2] + qr[2]);
2747         rte_prefetch0(grinder->qbase[3] + qr[3]);
2748 }
2749
2750 static inline void
2751 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2752 {
2753         struct rte_sched_grinder *grinder = subport->grinder + pos;
2754         uint32_t qpos = grinder->qpos;
2755         struct rte_mbuf **qbase = grinder->qbase[qpos];
2756         uint16_t qsize = grinder->qsize;
2757         uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2758
2759         grinder->pkt = qbase[qr];
2760         rte_prefetch0(grinder->pkt);
2761
2762         if (unlikely((qr & 0x7) == 7)) {
2763                 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2764
2765                 rte_prefetch0(qbase + qr_next);
2766         }
2767 }
2768
2769 static inline uint32_t
2770 grinder_handle(struct rte_sched_port *port,
2771         struct rte_sched_subport *subport, uint32_t pos)
2772 {
2773         struct rte_sched_grinder *grinder = subport->grinder + pos;
2774
2775         switch (grinder->state) {
2776         case e_GRINDER_PREFETCH_PIPE:
2777         {
2778                 if (grinder_next_pipe(port, subport, pos)) {
2779                         grinder_prefetch_pipe(subport, pos);
2780                         subport->busy_grinders++;
2781
2782                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2783                         return 0;
2784                 }
2785
2786                 return 0;
2787         }
2788
2789         case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2790         {
2791                 struct rte_sched_pipe *pipe = grinder->pipe;
2792
2793                 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2794                 grinder_prefetch_tc_queue_arrays(subport, pos);
2795                 grinder_credits_update(port, subport, pos);
2796
2797                 grinder->state = e_GRINDER_PREFETCH_MBUF;
2798                 return 0;
2799         }
2800
2801         case e_GRINDER_PREFETCH_MBUF:
2802         {
2803                 grinder_prefetch_mbuf(subport, pos);
2804
2805                 grinder->state = e_GRINDER_READ_MBUF;
2806                 return 0;
2807         }
2808
2809         case e_GRINDER_READ_MBUF:
2810         {
2811                 uint32_t wrr_active, result = 0;
2812
2813                 result = grinder_schedule(port, subport, pos);
2814
2815                 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2816
2817                 /* Look for next packet within the same TC */
2818                 if (result && grinder->qmask) {
2819                         if (wrr_active)
2820                                 grinder_wrr(subport, pos);
2821
2822                         grinder_prefetch_mbuf(subport, pos);
2823
2824                         return 1;
2825                 }
2826
2827                 if (wrr_active)
2828                         grinder_wrr_store(subport, pos);
2829
2830                 /* Look for another active TC within same pipe */
2831                 if (grinder_next_tc(port, subport, pos)) {
2832                         grinder_prefetch_tc_queue_arrays(subport, pos);
2833
2834                         grinder->state = e_GRINDER_PREFETCH_MBUF;
2835                         return result;
2836                 }
2837
2838                 if (grinder->productive == 0 &&
2839                     subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2840                         subport->pipe_loop = grinder->pindex;
2841
2842                 grinder_evict(subport, pos);
2843
2844                 /* Look for another active pipe */
2845                 if (grinder_next_pipe(port, subport, pos)) {
2846                         grinder_prefetch_pipe(subport, pos);
2847
2848                         grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2849                         return result;
2850                 }
2851
2852                 /* No active pipe found */
2853                 subport->busy_grinders--;
2854
2855                 grinder->state = e_GRINDER_PREFETCH_PIPE;
2856                 return result;
2857         }
2858
2859         default:
2860                 rte_panic("Algorithmic error (invalid state)\n");
2861                 return 0;
2862         }
2863 }
2864
2865 static inline void
2866 rte_sched_port_time_resync(struct rte_sched_port *port)
2867 {
2868         uint64_t cycles = rte_get_tsc_cycles();
2869         uint64_t cycles_diff;
2870         uint64_t bytes_diff;
2871         uint32_t i;
2872
2873         if (cycles < port->time_cpu_cycles)
2874                 port->time_cpu_cycles = 0;
2875
2876         cycles_diff = cycles - port->time_cpu_cycles;
2877         /* Compute elapsed time in bytes */
2878         bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2879                                            port->inv_cycles_per_byte);
2880
2881         /* Advance port time */
2882         port->time_cpu_cycles +=
2883                 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
2884         port->time_cpu_bytes += bytes_diff;
2885         if (port->time < port->time_cpu_bytes)
2886                 port->time = port->time_cpu_bytes;
2887
2888         /* Reset pipe loop detection */
2889         for (i = 0; i < port->n_subports_per_port; i++)
2890                 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2891 }
2892
2893 static inline int
2894 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2895 {
2896         int exceptions;
2897
2898         /* Check if any exception flag is set */
2899         exceptions = (second_pass && subport->busy_grinders == 0) ||
2900                 (subport->pipe_exhaustion == 1);
2901
2902         /* Clear exception flags */
2903         subport->pipe_exhaustion = 0;
2904
2905         return exceptions;
2906 }
2907
2908 int
2909 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2910 {
2911         struct rte_sched_subport *subport;
2912         uint32_t subport_id = port->subport_id;
2913         uint32_t i, n_subports = 0, count;
2914
2915         port->pkts_out = pkts;
2916         port->n_pkts_out = 0;
2917
2918         rte_sched_port_time_resync(port);
2919
2920         /* Take each queue in the grinder one step further */
2921         for (i = 0, count = 0; ; i++)  {
2922                 subport = port->subports[subport_id];
2923
2924                 count += grinder_handle(port, subport,
2925                                 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2926
2927                 if (count == n_pkts) {
2928                         subport_id++;
2929
2930                         if (subport_id == port->n_subports_per_port)
2931                                 subport_id = 0;
2932
2933                         port->subport_id = subport_id;
2934                         break;
2935                 }
2936
2937                 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
2938                         i = 0;
2939                         subport_id++;
2940                         n_subports++;
2941                 }
2942
2943                 if (subport_id == port->n_subports_per_port)
2944                         subport_id = 0;
2945
2946                 if (n_subports == port->n_subports_per_port) {
2947                         port->subport_id = subport_id;
2948                         break;
2949                 }
2950         }
2951
2952         return count;
2953 }