dbb251432d128e21df1f2147aa71884eba9b5e31
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37
38 #include <rte_malloc.h>
39
40 #include "rte_eth_softnic_internals.h"
41 #include "rte_eth_softnic.h"
42
43 #define BYTES_IN_MBPS           (1000 * 1000 / 8)
44 #define SUBPORT_TC_PERIOD       10
45 #define PIPE_TC_PERIOD          40
46
47 int
48 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
49 {
50         uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
51         uint32_t i;
52
53         /* rate */
54         if (params->soft.tm.rate) {
55                 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
56                         return -EINVAL;
57         } else {
58                 params->soft.tm.rate =
59                         (hard_rate_bytes_per_sec > UINT32_MAX) ?
60                                 UINT32_MAX : hard_rate_bytes_per_sec;
61         }
62
63         /* nb_queues */
64         if (params->soft.tm.nb_queues == 0)
65                 return -EINVAL;
66
67         if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
68                 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
69
70         params->soft.tm.nb_queues =
71                 rte_align32pow2(params->soft.tm.nb_queues);
72
73         /* qsize */
74         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
75                 if (params->soft.tm.qsize[i] == 0)
76                         return -EINVAL;
77
78                 params->soft.tm.qsize[i] =
79                         rte_align32pow2(params->soft.tm.qsize[i]);
80         }
81
82         /* enq_bsz, deq_bsz */
83         if (params->soft.tm.enq_bsz == 0 ||
84                 params->soft.tm.deq_bsz == 0 ||
85                 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
86                 return -EINVAL;
87
88         return 0;
89 }
90
91 static void
92 tm_hierarchy_init(struct pmd_internals *p)
93 {
94         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
95
96         /* Initialize shaper profile list */
97         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
98
99         /* Initialize shared shaper list */
100         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
101
102         /* Initialize wred profile list */
103         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
104
105         /* Initialize TM node list */
106         TAILQ_INIT(&p->soft.tm.h.nodes);
107 }
108
109 static void
110 tm_hierarchy_uninit(struct pmd_internals *p)
111 {
112         /* Remove all nodes*/
113         for ( ; ; ) {
114                 struct tm_node *tm_node;
115
116                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
117                 if (tm_node == NULL)
118                         break;
119
120                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
121                 free(tm_node);
122         }
123
124         /* Remove all WRED profiles */
125         for ( ; ; ) {
126                 struct tm_wred_profile *wred_profile;
127
128                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
129                 if (wred_profile == NULL)
130                         break;
131
132                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
133                 free(wred_profile);
134         }
135
136         /* Remove all shared shapers */
137         for ( ; ; ) {
138                 struct tm_shared_shaper *shared_shaper;
139
140                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
141                 if (shared_shaper == NULL)
142                         break;
143
144                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
145                 free(shared_shaper);
146         }
147
148         /* Remove all shaper profiles */
149         for ( ; ; ) {
150                 struct tm_shaper_profile *shaper_profile;
151
152                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
153                 if (shaper_profile == NULL)
154                         break;
155
156                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
157                         shaper_profile, node);
158                 free(shaper_profile);
159         }
160
161         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
162 }
163
164 int
165 tm_init(struct pmd_internals *p,
166         struct pmd_params *params,
167         int numa_node)
168 {
169         uint32_t enq_bsz = params->soft.tm.enq_bsz;
170         uint32_t deq_bsz = params->soft.tm.deq_bsz;
171
172         p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
173                 2 * enq_bsz * sizeof(struct rte_mbuf *),
174                 0,
175                 numa_node);
176
177         if (p->soft.tm.pkts_enq == NULL)
178                 return -ENOMEM;
179
180         p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
181                 deq_bsz * sizeof(struct rte_mbuf *),
182                 0,
183                 numa_node);
184
185         if (p->soft.tm.pkts_deq == NULL) {
186                 rte_free(p->soft.tm.pkts_enq);
187                 return -ENOMEM;
188         }
189
190         tm_hierarchy_init(p);
191
192         return 0;
193 }
194
195 void
196 tm_free(struct pmd_internals *p)
197 {
198         tm_hierarchy_uninit(p);
199         rte_free(p->soft.tm.pkts_enq);
200         rte_free(p->soft.tm.pkts_deq);
201 }
202
203 int
204 tm_start(struct pmd_internals *p)
205 {
206         struct tm_params *t = &p->soft.tm.params;
207         uint32_t n_subports, subport_id;
208         int status;
209
210         /* Is hierarchy frozen? */
211         if (p->soft.tm.hierarchy_frozen == 0)
212                 return -1;
213
214         /* Port */
215         p->soft.tm.sched = rte_sched_port_config(&t->port_params);
216         if (p->soft.tm.sched == NULL)
217                 return -1;
218
219         /* Subport */
220         n_subports = t->port_params.n_subports_per_port;
221         for (subport_id = 0; subport_id < n_subports; subport_id++) {
222                 uint32_t n_pipes_per_subport =
223                         t->port_params.n_pipes_per_subport;
224                 uint32_t pipe_id;
225
226                 status = rte_sched_subport_config(p->soft.tm.sched,
227                         subport_id,
228                         &t->subport_params[subport_id]);
229                 if (status) {
230                         rte_sched_port_free(p->soft.tm.sched);
231                         return -1;
232                 }
233
234                 /* Pipe */
235                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
236                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
237                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
238                                 pipe_id;
239                         int profile_id = t->pipe_to_profile[pos];
240
241                         if (profile_id < 0)
242                                 continue;
243
244                         status = rte_sched_pipe_config(p->soft.tm.sched,
245                                 subport_id,
246                                 pipe_id,
247                                 profile_id);
248                         if (status) {
249                                 rte_sched_port_free(p->soft.tm.sched);
250                                 return -1;
251                         }
252                 }
253         }
254
255         return 0;
256 }
257
258 void
259 tm_stop(struct pmd_internals *p)
260 {
261         if (p->soft.tm.sched)
262                 rte_sched_port_free(p->soft.tm.sched);
263
264         /* Unfreeze hierarchy */
265         p->soft.tm.hierarchy_frozen = 0;
266 }
267
268 static struct tm_shaper_profile *
269 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
270 {
271         struct pmd_internals *p = dev->data->dev_private;
272         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
273         struct tm_shaper_profile *sp;
274
275         TAILQ_FOREACH(sp, spl, node)
276                 if (shaper_profile_id == sp->shaper_profile_id)
277                         return sp;
278
279         return NULL;
280 }
281
282 static struct tm_shared_shaper *
283 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
284 {
285         struct pmd_internals *p = dev->data->dev_private;
286         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
287         struct tm_shared_shaper *ss;
288
289         TAILQ_FOREACH(ss, ssl, node)
290                 if (shared_shaper_id == ss->shared_shaper_id)
291                         return ss;
292
293         return NULL;
294 }
295
296 static struct tm_wred_profile *
297 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
298 {
299         struct pmd_internals *p = dev->data->dev_private;
300         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
301         struct tm_wred_profile *wp;
302
303         TAILQ_FOREACH(wp, wpl, node)
304                 if (wred_profile_id == wp->wred_profile_id)
305                         return wp;
306
307         return NULL;
308 }
309
310 static struct tm_node *
311 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
312 {
313         struct pmd_internals *p = dev->data->dev_private;
314         struct tm_node_list *nl = &p->soft.tm.h.nodes;
315         struct tm_node *n;
316
317         TAILQ_FOREACH(n, nl, node)
318                 if (n->node_id == node_id)
319                         return n;
320
321         return NULL;
322 }
323
324 static struct tm_node *
325 tm_root_node_present(struct rte_eth_dev *dev)
326 {
327         struct pmd_internals *p = dev->data->dev_private;
328         struct tm_node_list *nl = &p->soft.tm.h.nodes;
329         struct tm_node *n;
330
331         TAILQ_FOREACH(n, nl, node)
332                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
333                         return n;
334
335         return NULL;
336 }
337
338 static uint32_t
339 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
340 {
341         struct pmd_internals *p = dev->data->dev_private;
342         struct tm_node_list *nl = &p->soft.tm.h.nodes;
343         struct tm_node *ns;
344         uint32_t subport_id;
345
346         subport_id = 0;
347         TAILQ_FOREACH(ns, nl, node) {
348                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
349                         continue;
350
351                 if (ns->node_id == subport_node->node_id)
352                         return subport_id;
353
354                 subport_id++;
355         }
356
357         return UINT32_MAX;
358 }
359
360 static uint32_t
361 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
362 {
363         struct pmd_internals *p = dev->data->dev_private;
364         struct tm_node_list *nl = &p->soft.tm.h.nodes;
365         struct tm_node *np;
366         uint32_t pipe_id;
367
368         pipe_id = 0;
369         TAILQ_FOREACH(np, nl, node) {
370                 if (np->level != TM_NODE_LEVEL_PIPE ||
371                         np->parent_node_id != pipe_node->parent_node_id)
372                         continue;
373
374                 if (np->node_id == pipe_node->node_id)
375                         return pipe_id;
376
377                 pipe_id++;
378         }
379
380         return UINT32_MAX;
381 }
382
383 static uint32_t
384 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
385 {
386         return tc_node->priority;
387 }
388
389 static uint32_t
390 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
391 {
392         struct pmd_internals *p = dev->data->dev_private;
393         struct tm_node_list *nl = &p->soft.tm.h.nodes;
394         struct tm_node *nq;
395         uint32_t queue_id;
396
397         queue_id = 0;
398         TAILQ_FOREACH(nq, nl, node) {
399                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
400                         nq->parent_node_id != queue_node->parent_node_id)
401                         continue;
402
403                 if (nq->node_id == queue_node->node_id)
404                         return queue_id;
405
406                 queue_id++;
407         }
408
409         return UINT32_MAX;
410 }
411
412 static uint32_t
413 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
414 {
415         struct pmd_internals *p = dev->data->dev_private;
416         uint32_t n_queues_max = p->params.soft.tm.nb_queues;
417         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
418         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
419         uint32_t n_subports_max = n_pipes_max;
420         uint32_t n_root_max = 1;
421
422         switch (level) {
423         case TM_NODE_LEVEL_PORT:
424                 return n_root_max;
425         case TM_NODE_LEVEL_SUBPORT:
426                 return n_subports_max;
427         case TM_NODE_LEVEL_PIPE:
428                 return n_pipes_max;
429         case TM_NODE_LEVEL_TC:
430                 return n_tc_max;
431         case TM_NODE_LEVEL_QUEUE:
432         default:
433                 return n_queues_max;
434         }
435 }
436
437 /* Traffic manager node type get */
438 static int
439 pmd_tm_node_type_get(struct rte_eth_dev *dev,
440         uint32_t node_id,
441         int *is_leaf,
442         struct rte_tm_error *error)
443 {
444         struct pmd_internals *p = dev->data->dev_private;
445
446         if (is_leaf == NULL)
447                 return -rte_tm_error_set(error,
448                    EINVAL,
449                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
450                    NULL,
451                    rte_strerror(EINVAL));
452
453         if (node_id == RTE_TM_NODE_ID_NULL ||
454                 (tm_node_search(dev, node_id) == NULL))
455                 return -rte_tm_error_set(error,
456                    EINVAL,
457                    RTE_TM_ERROR_TYPE_NODE_ID,
458                    NULL,
459                    rte_strerror(EINVAL));
460
461         *is_leaf = node_id < p->params.soft.tm.nb_queues;
462
463         return 0;
464 }
465
466 #ifdef RTE_SCHED_RED
467 #define WRED_SUPPORTED                                          1
468 #else
469 #define WRED_SUPPORTED                                          0
470 #endif
471
472 #define STATS_MASK_DEFAULT                                      \
473         (RTE_TM_STATS_N_PKTS |                                  \
474         RTE_TM_STATS_N_BYTES |                                  \
475         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
476         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
477
478 #define STATS_MASK_QUEUE                                                \
479         (STATS_MASK_DEFAULT |                                   \
480         RTE_TM_STATS_N_PKTS_QUEUED)
481
482 static const struct rte_tm_capabilities tm_cap = {
483         .n_nodes_max = UINT32_MAX,
484         .n_levels_max = TM_NODE_LEVEL_MAX,
485
486         .non_leaf_nodes_identical = 0,
487         .leaf_nodes_identical = 1,
488
489         .shaper_n_max = UINT32_MAX,
490         .shaper_private_n_max = UINT32_MAX,
491         .shaper_private_dual_rate_n_max = 0,
492         .shaper_private_rate_min = 1,
493         .shaper_private_rate_max = UINT32_MAX,
494
495         .shaper_shared_n_max = UINT32_MAX,
496         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
497         .shaper_shared_n_shapers_per_node_max = 1,
498         .shaper_shared_dual_rate_n_max = 0,
499         .shaper_shared_rate_min = 1,
500         .shaper_shared_rate_max = UINT32_MAX,
501
502         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
503         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
504
505         .sched_n_children_max = UINT32_MAX,
506         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
507         .sched_wfq_n_children_per_group_max = UINT32_MAX,
508         .sched_wfq_n_groups_max = 1,
509         .sched_wfq_weight_max = UINT32_MAX,
510
511         .cman_head_drop_supported = 0,
512         .cman_wred_context_n_max = 0,
513         .cman_wred_context_private_n_max = 0,
514         .cman_wred_context_shared_n_max = 0,
515         .cman_wred_context_shared_n_nodes_per_context_max = 0,
516         .cman_wred_context_shared_n_contexts_per_node_max = 0,
517
518         .mark_vlan_dei_supported = {0, 0, 0},
519         .mark_ip_ecn_tcp_supported = {0, 0, 0},
520         .mark_ip_ecn_sctp_supported = {0, 0, 0},
521         .mark_ip_dscp_supported = {0, 0, 0},
522
523         .dynamic_update_mask = 0,
524
525         .stats_mask = STATS_MASK_QUEUE,
526 };
527
528 /* Traffic manager capabilities get */
529 static int
530 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
531         struct rte_tm_capabilities *cap,
532         struct rte_tm_error *error)
533 {
534         if (cap == NULL)
535                 return -rte_tm_error_set(error,
536                    EINVAL,
537                    RTE_TM_ERROR_TYPE_CAPABILITIES,
538                    NULL,
539                    rte_strerror(EINVAL));
540
541         memcpy(cap, &tm_cap, sizeof(*cap));
542
543         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
544                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
545                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
546                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
547                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
548
549         cap->shaper_private_n_max =
550                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
551                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
552                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
553                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
554
555         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
556                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
557
558         cap->shaper_n_max = cap->shaper_private_n_max +
559                 cap->shaper_shared_n_max;
560
561         cap->shaper_shared_n_nodes_per_shaper_max =
562                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
563
564         cap->sched_n_children_max = RTE_MAX(
565                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
566                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
567
568         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
569
570         if (WRED_SUPPORTED)
571                 cap->cman_wred_context_private_n_max =
572                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
573
574         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
575                 cap->cman_wred_context_shared_n_max;
576
577         return 0;
578 }
579
580 static const struct rte_tm_level_capabilities tm_level_cap[] = {
581         [TM_NODE_LEVEL_PORT] = {
582                 .n_nodes_max = 1,
583                 .n_nodes_nonleaf_max = 1,
584                 .n_nodes_leaf_max = 0,
585                 .non_leaf_nodes_identical = 1,
586                 .leaf_nodes_identical = 0,
587
588                 .nonleaf = {
589                         .shaper_private_supported = 1,
590                         .shaper_private_dual_rate_supported = 0,
591                         .shaper_private_rate_min = 1,
592                         .shaper_private_rate_max = UINT32_MAX,
593                         .shaper_shared_n_max = 0,
594
595                         .sched_n_children_max = UINT32_MAX,
596                         .sched_sp_n_priorities_max = 1,
597                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
598                         .sched_wfq_n_groups_max = 1,
599                         .sched_wfq_weight_max = 1,
600
601                         .stats_mask = STATS_MASK_DEFAULT,
602                 },
603         },
604
605         [TM_NODE_LEVEL_SUBPORT] = {
606                 .n_nodes_max = UINT32_MAX,
607                 .n_nodes_nonleaf_max = UINT32_MAX,
608                 .n_nodes_leaf_max = 0,
609                 .non_leaf_nodes_identical = 1,
610                 .leaf_nodes_identical = 0,
611
612                 .nonleaf = {
613                         .shaper_private_supported = 1,
614                         .shaper_private_dual_rate_supported = 0,
615                         .shaper_private_rate_min = 1,
616                         .shaper_private_rate_max = UINT32_MAX,
617                         .shaper_shared_n_max = 0,
618
619                         .sched_n_children_max = UINT32_MAX,
620                         .sched_sp_n_priorities_max = 1,
621                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
622                         .sched_wfq_n_groups_max = 1,
623 #ifdef RTE_SCHED_SUBPORT_TC_OV
624                         .sched_wfq_weight_max = UINT32_MAX,
625 #else
626                         .sched_wfq_weight_max = 1,
627 #endif
628                         .stats_mask = STATS_MASK_DEFAULT,
629                 },
630         },
631
632         [TM_NODE_LEVEL_PIPE] = {
633                 .n_nodes_max = UINT32_MAX,
634                 .n_nodes_nonleaf_max = UINT32_MAX,
635                 .n_nodes_leaf_max = 0,
636                 .non_leaf_nodes_identical = 1,
637                 .leaf_nodes_identical = 0,
638
639                 .nonleaf = {
640                         .shaper_private_supported = 1,
641                         .shaper_private_dual_rate_supported = 0,
642                         .shaper_private_rate_min = 1,
643                         .shaper_private_rate_max = UINT32_MAX,
644                         .shaper_shared_n_max = 0,
645
646                         .sched_n_children_max =
647                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
648                         .sched_sp_n_priorities_max =
649                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
650                         .sched_wfq_n_children_per_group_max = 1,
651                         .sched_wfq_n_groups_max = 0,
652                         .sched_wfq_weight_max = 1,
653
654                         .stats_mask = STATS_MASK_DEFAULT,
655                 },
656         },
657
658         [TM_NODE_LEVEL_TC] = {
659                 .n_nodes_max = UINT32_MAX,
660                 .n_nodes_nonleaf_max = UINT32_MAX,
661                 .n_nodes_leaf_max = 0,
662                 .non_leaf_nodes_identical = 1,
663                 .leaf_nodes_identical = 0,
664
665                 .nonleaf = {
666                         .shaper_private_supported = 1,
667                         .shaper_private_dual_rate_supported = 0,
668                         .shaper_private_rate_min = 1,
669                         .shaper_private_rate_max = UINT32_MAX,
670                         .shaper_shared_n_max = 1,
671
672                         .sched_n_children_max =
673                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
674                         .sched_sp_n_priorities_max = 1,
675                         .sched_wfq_n_children_per_group_max =
676                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
677                         .sched_wfq_n_groups_max = 1,
678                         .sched_wfq_weight_max = UINT32_MAX,
679
680                         .stats_mask = STATS_MASK_DEFAULT,
681                 },
682         },
683
684         [TM_NODE_LEVEL_QUEUE] = {
685                 .n_nodes_max = UINT32_MAX,
686                 .n_nodes_nonleaf_max = 0,
687                 .n_nodes_leaf_max = UINT32_MAX,
688                 .non_leaf_nodes_identical = 0,
689                 .leaf_nodes_identical = 1,
690
691                 .leaf = {
692                         .shaper_private_supported = 0,
693                         .shaper_private_dual_rate_supported = 0,
694                         .shaper_private_rate_min = 0,
695                         .shaper_private_rate_max = 0,
696                         .shaper_shared_n_max = 0,
697
698                         .cman_head_drop_supported = 0,
699                         .cman_wred_context_private_supported = WRED_SUPPORTED,
700                         .cman_wred_context_shared_n_max = 0,
701
702                         .stats_mask = STATS_MASK_QUEUE,
703                 },
704         },
705 };
706
707 /* Traffic manager level capabilities get */
708 static int
709 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
710         uint32_t level_id,
711         struct rte_tm_level_capabilities *cap,
712         struct rte_tm_error *error)
713 {
714         if (cap == NULL)
715                 return -rte_tm_error_set(error,
716                    EINVAL,
717                    RTE_TM_ERROR_TYPE_CAPABILITIES,
718                    NULL,
719                    rte_strerror(EINVAL));
720
721         if (level_id >= TM_NODE_LEVEL_MAX)
722                 return -rte_tm_error_set(error,
723                    EINVAL,
724                    RTE_TM_ERROR_TYPE_LEVEL_ID,
725                    NULL,
726                    rte_strerror(EINVAL));
727
728         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
729
730         switch (level_id) {
731         case TM_NODE_LEVEL_PORT:
732                 cap->nonleaf.sched_n_children_max =
733                         tm_level_get_max_nodes(dev,
734                                 TM_NODE_LEVEL_SUBPORT);
735                 cap->nonleaf.sched_wfq_n_children_per_group_max =
736                         cap->nonleaf.sched_n_children_max;
737                 break;
738
739         case TM_NODE_LEVEL_SUBPORT:
740                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
741                         TM_NODE_LEVEL_SUBPORT);
742                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
743                 cap->nonleaf.sched_n_children_max =
744                         tm_level_get_max_nodes(dev,
745                                 TM_NODE_LEVEL_PIPE);
746                 cap->nonleaf.sched_wfq_n_children_per_group_max =
747                         cap->nonleaf.sched_n_children_max;
748                 break;
749
750         case TM_NODE_LEVEL_PIPE:
751                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
752                         TM_NODE_LEVEL_PIPE);
753                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
754                 break;
755
756         case TM_NODE_LEVEL_TC:
757                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
758                         TM_NODE_LEVEL_TC);
759                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
760                 break;
761
762         case TM_NODE_LEVEL_QUEUE:
763         default:
764                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
765                         TM_NODE_LEVEL_QUEUE);
766                 cap->n_nodes_leaf_max = cap->n_nodes_max;
767                 break;
768         }
769
770         return 0;
771 }
772
773 static const struct rte_tm_node_capabilities tm_node_cap[] = {
774         [TM_NODE_LEVEL_PORT] = {
775                 .shaper_private_supported = 1,
776                 .shaper_private_dual_rate_supported = 0,
777                 .shaper_private_rate_min = 1,
778                 .shaper_private_rate_max = UINT32_MAX,
779                 .shaper_shared_n_max = 0,
780
781                 .nonleaf = {
782                         .sched_n_children_max = UINT32_MAX,
783                         .sched_sp_n_priorities_max = 1,
784                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
785                         .sched_wfq_n_groups_max = 1,
786                         .sched_wfq_weight_max = 1,
787                 },
788
789                 .stats_mask = STATS_MASK_DEFAULT,
790         },
791
792         [TM_NODE_LEVEL_SUBPORT] = {
793                 .shaper_private_supported = 1,
794                 .shaper_private_dual_rate_supported = 0,
795                 .shaper_private_rate_min = 1,
796                 .shaper_private_rate_max = UINT32_MAX,
797                 .shaper_shared_n_max = 0,
798
799                 .nonleaf = {
800                         .sched_n_children_max = UINT32_MAX,
801                         .sched_sp_n_priorities_max = 1,
802                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
803                         .sched_wfq_n_groups_max = 1,
804                         .sched_wfq_weight_max = UINT32_MAX,
805                 },
806
807                 .stats_mask = STATS_MASK_DEFAULT,
808         },
809
810         [TM_NODE_LEVEL_PIPE] = {
811                 .shaper_private_supported = 1,
812                 .shaper_private_dual_rate_supported = 0,
813                 .shaper_private_rate_min = 1,
814                 .shaper_private_rate_max = UINT32_MAX,
815                 .shaper_shared_n_max = 0,
816
817                 .nonleaf = {
818                         .sched_n_children_max =
819                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
820                         .sched_sp_n_priorities_max =
821                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
822                         .sched_wfq_n_children_per_group_max = 1,
823                         .sched_wfq_n_groups_max = 0,
824                         .sched_wfq_weight_max = 1,
825                 },
826
827                 .stats_mask = STATS_MASK_DEFAULT,
828         },
829
830         [TM_NODE_LEVEL_TC] = {
831                 .shaper_private_supported = 1,
832                 .shaper_private_dual_rate_supported = 0,
833                 .shaper_private_rate_min = 1,
834                 .shaper_private_rate_max = UINT32_MAX,
835                 .shaper_shared_n_max = 1,
836
837                 .nonleaf = {
838                         .sched_n_children_max =
839                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
840                         .sched_sp_n_priorities_max = 1,
841                         .sched_wfq_n_children_per_group_max =
842                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
843                         .sched_wfq_n_groups_max = 1,
844                         .sched_wfq_weight_max = UINT32_MAX,
845                 },
846
847                 .stats_mask = STATS_MASK_DEFAULT,
848         },
849
850         [TM_NODE_LEVEL_QUEUE] = {
851                 .shaper_private_supported = 0,
852                 .shaper_private_dual_rate_supported = 0,
853                 .shaper_private_rate_min = 0,
854                 .shaper_private_rate_max = 0,
855                 .shaper_shared_n_max = 0,
856
857
858                 .leaf = {
859                         .cman_head_drop_supported = 0,
860                         .cman_wred_context_private_supported = WRED_SUPPORTED,
861                         .cman_wred_context_shared_n_max = 0,
862                 },
863
864                 .stats_mask = STATS_MASK_QUEUE,
865         },
866 };
867
868 /* Traffic manager node capabilities get */
869 static int
870 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
871         uint32_t node_id,
872         struct rte_tm_node_capabilities *cap,
873         struct rte_tm_error *error)
874 {
875         struct tm_node *tm_node;
876
877         if (cap == NULL)
878                 return -rte_tm_error_set(error,
879                    EINVAL,
880                    RTE_TM_ERROR_TYPE_CAPABILITIES,
881                    NULL,
882                    rte_strerror(EINVAL));
883
884         tm_node = tm_node_search(dev, node_id);
885         if (tm_node == NULL)
886                 return -rte_tm_error_set(error,
887                    EINVAL,
888                    RTE_TM_ERROR_TYPE_NODE_ID,
889                    NULL,
890                    rte_strerror(EINVAL));
891
892         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
893
894         switch (tm_node->level) {
895         case TM_NODE_LEVEL_PORT:
896                 cap->nonleaf.sched_n_children_max =
897                         tm_level_get_max_nodes(dev,
898                                 TM_NODE_LEVEL_SUBPORT);
899                 cap->nonleaf.sched_wfq_n_children_per_group_max =
900                         cap->nonleaf.sched_n_children_max;
901                 break;
902
903         case TM_NODE_LEVEL_SUBPORT:
904                 cap->nonleaf.sched_n_children_max =
905                         tm_level_get_max_nodes(dev,
906                                 TM_NODE_LEVEL_PIPE);
907                 cap->nonleaf.sched_wfq_n_children_per_group_max =
908                         cap->nonleaf.sched_n_children_max;
909                 break;
910
911         case TM_NODE_LEVEL_PIPE:
912         case TM_NODE_LEVEL_TC:
913         case TM_NODE_LEVEL_QUEUE:
914         default:
915                 break;
916         }
917
918         return 0;
919 }
920
921 static int
922 shaper_profile_check(struct rte_eth_dev *dev,
923         uint32_t shaper_profile_id,
924         struct rte_tm_shaper_params *profile,
925         struct rte_tm_error *error)
926 {
927         struct tm_shaper_profile *sp;
928
929         /* Shaper profile ID must not be NONE. */
930         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
931                 return -rte_tm_error_set(error,
932                         EINVAL,
933                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
934                         NULL,
935                         rte_strerror(EINVAL));
936
937         /* Shaper profile must not exist. */
938         sp = tm_shaper_profile_search(dev, shaper_profile_id);
939         if (sp)
940                 return -rte_tm_error_set(error,
941                         EEXIST,
942                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
943                         NULL,
944                         rte_strerror(EEXIST));
945
946         /* Profile must not be NULL. */
947         if (profile == NULL)
948                 return -rte_tm_error_set(error,
949                         EINVAL,
950                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
951                         NULL,
952                         rte_strerror(EINVAL));
953
954         /* Peak rate: non-zero, 32-bit */
955         if (profile->peak.rate == 0 ||
956                 profile->peak.rate >= UINT32_MAX)
957                 return -rte_tm_error_set(error,
958                         EINVAL,
959                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
960                         NULL,
961                         rte_strerror(EINVAL));
962
963         /* Peak size: non-zero, 32-bit */
964         if (profile->peak.size == 0 ||
965                 profile->peak.size >= UINT32_MAX)
966                 return -rte_tm_error_set(error,
967                         EINVAL,
968                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
969                         NULL,
970                         rte_strerror(EINVAL));
971
972         /* Dual-rate profiles are not supported. */
973         if (profile->committed.rate != 0)
974                 return -rte_tm_error_set(error,
975                         EINVAL,
976                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
977                         NULL,
978                         rte_strerror(EINVAL));
979
980         /* Packet length adjust: 24 bytes */
981         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
982                 return -rte_tm_error_set(error,
983                         EINVAL,
984                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
985                         NULL,
986                         rte_strerror(EINVAL));
987
988         return 0;
989 }
990
991 /* Traffic manager shaper profile add */
992 static int
993 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
994         uint32_t shaper_profile_id,
995         struct rte_tm_shaper_params *profile,
996         struct rte_tm_error *error)
997 {
998         struct pmd_internals *p = dev->data->dev_private;
999         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1000         struct tm_shaper_profile *sp;
1001         int status;
1002
1003         /* Check input params */
1004         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1005         if (status)
1006                 return status;
1007
1008         /* Memory allocation */
1009         sp = calloc(1, sizeof(struct tm_shaper_profile));
1010         if (sp == NULL)
1011                 return -rte_tm_error_set(error,
1012                         ENOMEM,
1013                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1014                         NULL,
1015                         rte_strerror(ENOMEM));
1016
1017         /* Fill in */
1018         sp->shaper_profile_id = shaper_profile_id;
1019         memcpy(&sp->params, profile, sizeof(sp->params));
1020
1021         /* Add to list */
1022         TAILQ_INSERT_TAIL(spl, sp, node);
1023         p->soft.tm.h.n_shaper_profiles++;
1024
1025         return 0;
1026 }
1027
1028 /* Traffic manager shaper profile delete */
1029 static int
1030 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1031         uint32_t shaper_profile_id,
1032         struct rte_tm_error *error)
1033 {
1034         struct pmd_internals *p = dev->data->dev_private;
1035         struct tm_shaper_profile *sp;
1036
1037         /* Check existing */
1038         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1039         if (sp == NULL)
1040                 return -rte_tm_error_set(error,
1041                         EINVAL,
1042                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1043                         NULL,
1044                         rte_strerror(EINVAL));
1045
1046         /* Check unused */
1047         if (sp->n_users)
1048                 return -rte_tm_error_set(error,
1049                         EBUSY,
1050                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1051                         NULL,
1052                         rte_strerror(EBUSY));
1053
1054         /* Remove from list */
1055         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1056         p->soft.tm.h.n_shaper_profiles--;
1057         free(sp);
1058
1059         return 0;
1060 }
1061
1062 static struct tm_node *
1063 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1064         struct tm_shared_shaper *ss)
1065 {
1066         struct pmd_internals *p = dev->data->dev_private;
1067         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1068         struct tm_node *n;
1069
1070         /* Subport: each TC uses shared shaper  */
1071         TAILQ_FOREACH(n, nl, node) {
1072                 if (n->level != TM_NODE_LEVEL_TC ||
1073                         n->params.n_shared_shapers == 0 ||
1074                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1075                         continue;
1076
1077                 return n;
1078         }
1079
1080         return NULL;
1081 }
1082
1083 static int
1084 update_subport_tc_rate(struct rte_eth_dev *dev,
1085         struct tm_node *nt,
1086         struct tm_shared_shaper *ss,
1087         struct tm_shaper_profile *sp_new)
1088 {
1089         struct pmd_internals *p = dev->data->dev_private;
1090         uint32_t tc_id = tm_node_tc_id(dev, nt);
1091
1092         struct tm_node *np = nt->parent_node;
1093
1094         struct tm_node *ns = np->parent_node;
1095         uint32_t subport_id = tm_node_subport_id(dev, ns);
1096
1097         struct rte_sched_subport_params subport_params;
1098
1099         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1100                 ss->shaper_profile_id);
1101
1102         /* Derive new subport configuration. */
1103         memcpy(&subport_params,
1104                 &p->soft.tm.params.subport_params[subport_id],
1105                 sizeof(subport_params));
1106         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1107
1108         /* Update the subport configuration. */
1109         if (rte_sched_subport_config(p->soft.tm.sched,
1110                 subport_id, &subport_params))
1111                 return -1;
1112
1113         /* Commit changes. */
1114         sp_old->n_users--;
1115
1116         ss->shaper_profile_id = sp_new->shaper_profile_id;
1117         sp_new->n_users++;
1118
1119         memcpy(&p->soft.tm.params.subport_params[subport_id],
1120                 &subport_params,
1121                 sizeof(subport_params));
1122
1123         return 0;
1124 }
1125
1126 /* Traffic manager shared shaper add/update */
1127 static int
1128 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1129         uint32_t shared_shaper_id,
1130         uint32_t shaper_profile_id,
1131         struct rte_tm_error *error)
1132 {
1133         struct pmd_internals *p = dev->data->dev_private;
1134         struct tm_shared_shaper *ss;
1135         struct tm_shaper_profile *sp;
1136         struct tm_node *nt;
1137
1138         /* Shaper profile must be valid. */
1139         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1140         if (sp == NULL)
1141                 return -rte_tm_error_set(error,
1142                         EINVAL,
1143                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1144                         NULL,
1145                         rte_strerror(EINVAL));
1146
1147         /**
1148          * Add new shared shaper
1149          */
1150         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1151         if (ss == NULL) {
1152                 struct tm_shared_shaper_list *ssl =
1153                         &p->soft.tm.h.shared_shapers;
1154
1155                 /* Hierarchy must not be frozen */
1156                 if (p->soft.tm.hierarchy_frozen)
1157                         return -rte_tm_error_set(error,
1158                                 EBUSY,
1159                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1160                                 NULL,
1161                                 rte_strerror(EBUSY));
1162
1163                 /* Memory allocation */
1164                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1165                 if (ss == NULL)
1166                         return -rte_tm_error_set(error,
1167                                 ENOMEM,
1168                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1169                                 NULL,
1170                                 rte_strerror(ENOMEM));
1171
1172                 /* Fill in */
1173                 ss->shared_shaper_id = shared_shaper_id;
1174                 ss->shaper_profile_id = shaper_profile_id;
1175
1176                 /* Add to list */
1177                 TAILQ_INSERT_TAIL(ssl, ss, node);
1178                 p->soft.tm.h.n_shared_shapers++;
1179
1180                 return 0;
1181         }
1182
1183         /**
1184          * Update existing shared shaper
1185          */
1186         /* Hierarchy must be frozen (run-time update) */
1187         if (p->soft.tm.hierarchy_frozen == 0)
1188                 return -rte_tm_error_set(error,
1189                         EBUSY,
1190                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1191                         NULL,
1192                         rte_strerror(EBUSY));
1193
1194
1195         /* Propagate change. */
1196         nt = tm_shared_shaper_get_tc(dev, ss);
1197         if (update_subport_tc_rate(dev, nt, ss, sp))
1198                 return -rte_tm_error_set(error,
1199                         EINVAL,
1200                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1201                         NULL,
1202                         rte_strerror(EINVAL));
1203
1204         return 0;
1205 }
1206
1207 /* Traffic manager shared shaper delete */
1208 static int
1209 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1210         uint32_t shared_shaper_id,
1211         struct rte_tm_error *error)
1212 {
1213         struct pmd_internals *p = dev->data->dev_private;
1214         struct tm_shared_shaper *ss;
1215
1216         /* Check existing */
1217         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1218         if (ss == NULL)
1219                 return -rte_tm_error_set(error,
1220                         EINVAL,
1221                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1222                         NULL,
1223                         rte_strerror(EINVAL));
1224
1225         /* Check unused */
1226         if (ss->n_users)
1227                 return -rte_tm_error_set(error,
1228                         EBUSY,
1229                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1230                         NULL,
1231                         rte_strerror(EBUSY));
1232
1233         /* Remove from list */
1234         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1235         p->soft.tm.h.n_shared_shapers--;
1236         free(ss);
1237
1238         return 0;
1239 }
1240
1241 static int
1242 wred_profile_check(struct rte_eth_dev *dev,
1243         uint32_t wred_profile_id,
1244         struct rte_tm_wred_params *profile,
1245         struct rte_tm_error *error)
1246 {
1247         struct tm_wred_profile *wp;
1248         enum rte_tm_color color;
1249
1250         /* WRED profile ID must not be NONE. */
1251         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1252                 return -rte_tm_error_set(error,
1253                         EINVAL,
1254                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1255                         NULL,
1256                         rte_strerror(EINVAL));
1257
1258         /* WRED profile must not exist. */
1259         wp = tm_wred_profile_search(dev, wred_profile_id);
1260         if (wp)
1261                 return -rte_tm_error_set(error,
1262                         EEXIST,
1263                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1264                         NULL,
1265                         rte_strerror(EEXIST));
1266
1267         /* Profile must not be NULL. */
1268         if (profile == NULL)
1269                 return -rte_tm_error_set(error,
1270                         EINVAL,
1271                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1272                         NULL,
1273                         rte_strerror(EINVAL));
1274
1275         /* min_th <= max_th, max_th > 0  */
1276         for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1277                 uint16_t min_th = profile->red_params[color].min_th;
1278                 uint16_t max_th = profile->red_params[color].max_th;
1279
1280                 if (min_th > max_th || max_th == 0)
1281                         return -rte_tm_error_set(error,
1282                                 EINVAL,
1283                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1284                                 NULL,
1285                                 rte_strerror(EINVAL));
1286         }
1287
1288         return 0;
1289 }
1290
1291 /* Traffic manager WRED profile add */
1292 static int
1293 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1294         uint32_t wred_profile_id,
1295         struct rte_tm_wred_params *profile,
1296         struct rte_tm_error *error)
1297 {
1298         struct pmd_internals *p = dev->data->dev_private;
1299         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1300         struct tm_wred_profile *wp;
1301         int status;
1302
1303         /* Check input params */
1304         status = wred_profile_check(dev, wred_profile_id, profile, error);
1305         if (status)
1306                 return status;
1307
1308         /* Memory allocation */
1309         wp = calloc(1, sizeof(struct tm_wred_profile));
1310         if (wp == NULL)
1311                 return -rte_tm_error_set(error,
1312                         ENOMEM,
1313                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1314                         NULL,
1315                         rte_strerror(ENOMEM));
1316
1317         /* Fill in */
1318         wp->wred_profile_id = wred_profile_id;
1319         memcpy(&wp->params, profile, sizeof(wp->params));
1320
1321         /* Add to list */
1322         TAILQ_INSERT_TAIL(wpl, wp, node);
1323         p->soft.tm.h.n_wred_profiles++;
1324
1325         return 0;
1326 }
1327
1328 /* Traffic manager WRED profile delete */
1329 static int
1330 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1331         uint32_t wred_profile_id,
1332         struct rte_tm_error *error)
1333 {
1334         struct pmd_internals *p = dev->data->dev_private;
1335         struct tm_wred_profile *wp;
1336
1337         /* Check existing */
1338         wp = tm_wred_profile_search(dev, wred_profile_id);
1339         if (wp == NULL)
1340                 return -rte_tm_error_set(error,
1341                         EINVAL,
1342                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1343                         NULL,
1344                         rte_strerror(EINVAL));
1345
1346         /* Check unused */
1347         if (wp->n_users)
1348                 return -rte_tm_error_set(error,
1349                         EBUSY,
1350                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1351                         NULL,
1352                         rte_strerror(EBUSY));
1353
1354         /* Remove from list */
1355         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1356         p->soft.tm.h.n_wred_profiles--;
1357         free(wp);
1358
1359         return 0;
1360 }
1361
1362 static int
1363 node_add_check_port(struct rte_eth_dev *dev,
1364         uint32_t node_id,
1365         uint32_t parent_node_id __rte_unused,
1366         uint32_t priority,
1367         uint32_t weight,
1368         uint32_t level_id __rte_unused,
1369         struct rte_tm_node_params *params,
1370         struct rte_tm_error *error)
1371 {
1372         struct pmd_internals *p = dev->data->dev_private;
1373         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1374                 params->shaper_profile_id);
1375
1376         /* node type: non-leaf */
1377         if (node_id < p->params.soft.tm.nb_queues)
1378                 return -rte_tm_error_set(error,
1379                         EINVAL,
1380                         RTE_TM_ERROR_TYPE_NODE_ID,
1381                         NULL,
1382                         rte_strerror(EINVAL));
1383
1384         /* Priority must be 0 */
1385         if (priority != 0)
1386                 return -rte_tm_error_set(error,
1387                         EINVAL,
1388                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1389                         NULL,
1390                         rte_strerror(EINVAL));
1391
1392         /* Weight must be 1 */
1393         if (weight != 1)
1394                 return -rte_tm_error_set(error,
1395                         EINVAL,
1396                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1397                         NULL,
1398                         rte_strerror(EINVAL));
1399
1400         /* Shaper must be valid.
1401          * Shaper profile peak rate must fit the configured port rate.
1402          */
1403         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1404                 sp == NULL ||
1405                 sp->params.peak.rate > p->params.soft.tm.rate)
1406                 return -rte_tm_error_set(error,
1407                         EINVAL,
1408                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1409                         NULL,
1410                         rte_strerror(EINVAL));
1411
1412         /* No shared shapers */
1413         if (params->n_shared_shapers != 0)
1414                 return -rte_tm_error_set(error,
1415                         EINVAL,
1416                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1417                         NULL,
1418                         rte_strerror(EINVAL));
1419
1420         /* Number of SP priorities must be 1 */
1421         if (params->nonleaf.n_sp_priorities != 1)
1422                 return -rte_tm_error_set(error,
1423                         EINVAL,
1424                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1425                         NULL,
1426                         rte_strerror(EINVAL));
1427
1428         /* Stats */
1429         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1430                 return -rte_tm_error_set(error,
1431                         EINVAL,
1432                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1433                         NULL,
1434                         rte_strerror(EINVAL));
1435
1436         return 0;
1437 }
1438
1439 static int
1440 node_add_check_subport(struct rte_eth_dev *dev,
1441         uint32_t node_id,
1442         uint32_t parent_node_id __rte_unused,
1443         uint32_t priority,
1444         uint32_t weight,
1445         uint32_t level_id __rte_unused,
1446         struct rte_tm_node_params *params,
1447         struct rte_tm_error *error)
1448 {
1449         struct pmd_internals *p = dev->data->dev_private;
1450
1451         /* node type: non-leaf */
1452         if (node_id < p->params.soft.tm.nb_queues)
1453                 return -rte_tm_error_set(error,
1454                         EINVAL,
1455                         RTE_TM_ERROR_TYPE_NODE_ID,
1456                         NULL,
1457                         rte_strerror(EINVAL));
1458
1459         /* Priority must be 0 */
1460         if (priority != 0)
1461                 return -rte_tm_error_set(error,
1462                         EINVAL,
1463                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1464                         NULL,
1465                         rte_strerror(EINVAL));
1466
1467         /* Weight must be 1 */
1468         if (weight != 1)
1469                 return -rte_tm_error_set(error,
1470                         EINVAL,
1471                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1472                         NULL,
1473                         rte_strerror(EINVAL));
1474
1475         /* Shaper must be valid */
1476         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1477                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1478                 return -rte_tm_error_set(error,
1479                         EINVAL,
1480                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1481                         NULL,
1482                         rte_strerror(EINVAL));
1483
1484         /* No shared shapers */
1485         if (params->n_shared_shapers != 0)
1486                 return -rte_tm_error_set(error,
1487                         EINVAL,
1488                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1489                         NULL,
1490                         rte_strerror(EINVAL));
1491
1492         /* Number of SP priorities must be 1 */
1493         if (params->nonleaf.n_sp_priorities != 1)
1494                 return -rte_tm_error_set(error,
1495                         EINVAL,
1496                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1497                         NULL,
1498                         rte_strerror(EINVAL));
1499
1500         /* Stats */
1501         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1502                 return -rte_tm_error_set(error,
1503                         EINVAL,
1504                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1505                         NULL,
1506                         rte_strerror(EINVAL));
1507
1508         return 0;
1509 }
1510
1511 static int
1512 node_add_check_pipe(struct rte_eth_dev *dev,
1513         uint32_t node_id,
1514         uint32_t parent_node_id __rte_unused,
1515         uint32_t priority,
1516         uint32_t weight __rte_unused,
1517         uint32_t level_id __rte_unused,
1518         struct rte_tm_node_params *params,
1519         struct rte_tm_error *error)
1520 {
1521         struct pmd_internals *p = dev->data->dev_private;
1522
1523         /* node type: non-leaf */
1524         if (node_id < p->params.soft.tm.nb_queues)
1525                 return -rte_tm_error_set(error,
1526                         EINVAL,
1527                         RTE_TM_ERROR_TYPE_NODE_ID,
1528                         NULL,
1529                         rte_strerror(EINVAL));
1530
1531         /* Priority must be 0 */
1532         if (priority != 0)
1533                 return -rte_tm_error_set(error,
1534                         EINVAL,
1535                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1536                         NULL,
1537                         rte_strerror(EINVAL));
1538
1539         /* Shaper must be valid */
1540         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1541                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1542                 return -rte_tm_error_set(error,
1543                         EINVAL,
1544                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1545                         NULL,
1546                         rte_strerror(EINVAL));
1547
1548         /* No shared shapers */
1549         if (params->n_shared_shapers != 0)
1550                 return -rte_tm_error_set(error,
1551                         EINVAL,
1552                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1553                         NULL,
1554                         rte_strerror(EINVAL));
1555
1556         /* Number of SP priorities must be 4 */
1557         if (params->nonleaf.n_sp_priorities !=
1558                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1559                 return -rte_tm_error_set(error,
1560                         EINVAL,
1561                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1562                         NULL,
1563                         rte_strerror(EINVAL));
1564
1565         /* WFQ mode must be byte mode */
1566         if (params->nonleaf.wfq_weight_mode != NULL &&
1567                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1568                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1569                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1570                 params->nonleaf.wfq_weight_mode[3] != 0)
1571                 return -rte_tm_error_set(error,
1572                         EINVAL,
1573                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1574                         NULL,
1575                         rte_strerror(EINVAL));
1576
1577         /* Stats */
1578         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1579                 return -rte_tm_error_set(error,
1580                         EINVAL,
1581                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1582                         NULL,
1583                         rte_strerror(EINVAL));
1584
1585         return 0;
1586 }
1587
1588 static int
1589 node_add_check_tc(struct rte_eth_dev *dev,
1590         uint32_t node_id,
1591         uint32_t parent_node_id __rte_unused,
1592         uint32_t priority __rte_unused,
1593         uint32_t weight,
1594         uint32_t level_id __rte_unused,
1595         struct rte_tm_node_params *params,
1596         struct rte_tm_error *error)
1597 {
1598         struct pmd_internals *p = dev->data->dev_private;
1599
1600         /* node type: non-leaf */
1601         if (node_id < p->params.soft.tm.nb_queues)
1602                 return -rte_tm_error_set(error,
1603                         EINVAL,
1604                         RTE_TM_ERROR_TYPE_NODE_ID,
1605                         NULL,
1606                         rte_strerror(EINVAL));
1607
1608         /* Weight must be 1 */
1609         if (weight != 1)
1610                 return -rte_tm_error_set(error,
1611                         EINVAL,
1612                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1613                         NULL,
1614                         rte_strerror(EINVAL));
1615
1616         /* Shaper must be valid */
1617         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1618                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1619                 return -rte_tm_error_set(error,
1620                         EINVAL,
1621                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1622                         NULL,
1623                         rte_strerror(EINVAL));
1624
1625         /* Single valid shared shaper */
1626         if (params->n_shared_shapers > 1)
1627                 return -rte_tm_error_set(error,
1628                         EINVAL,
1629                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1630                         NULL,
1631                         rte_strerror(EINVAL));
1632
1633         if (params->n_shared_shapers == 1 &&
1634                 (params->shared_shaper_id == NULL ||
1635                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1636                 return -rte_tm_error_set(error,
1637                         EINVAL,
1638                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1639                         NULL,
1640                         rte_strerror(EINVAL));
1641
1642         /* Number of priorities must be 1 */
1643         if (params->nonleaf.n_sp_priorities != 1)
1644                 return -rte_tm_error_set(error,
1645                         EINVAL,
1646                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1647                         NULL,
1648                         rte_strerror(EINVAL));
1649
1650         /* Stats */
1651         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1652                 return -rte_tm_error_set(error,
1653                         EINVAL,
1654                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1655                         NULL,
1656                         rte_strerror(EINVAL));
1657
1658         return 0;
1659 }
1660
1661 static int
1662 node_add_check_queue(struct rte_eth_dev *dev,
1663         uint32_t node_id,
1664         uint32_t parent_node_id __rte_unused,
1665         uint32_t priority,
1666         uint32_t weight __rte_unused,
1667         uint32_t level_id __rte_unused,
1668         struct rte_tm_node_params *params,
1669         struct rte_tm_error *error)
1670 {
1671         struct pmd_internals *p = dev->data->dev_private;
1672
1673         /* node type: leaf */
1674         if (node_id >= p->params.soft.tm.nb_queues)
1675                 return -rte_tm_error_set(error,
1676                         EINVAL,
1677                         RTE_TM_ERROR_TYPE_NODE_ID,
1678                         NULL,
1679                         rte_strerror(EINVAL));
1680
1681         /* Priority must be 0 */
1682         if (priority != 0)
1683                 return -rte_tm_error_set(error,
1684                         EINVAL,
1685                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1686                         NULL,
1687                         rte_strerror(EINVAL));
1688
1689         /* No shaper */
1690         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1691                 return -rte_tm_error_set(error,
1692                         EINVAL,
1693                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1694                         NULL,
1695                         rte_strerror(EINVAL));
1696
1697         /* No shared shapers */
1698         if (params->n_shared_shapers != 0)
1699                 return -rte_tm_error_set(error,
1700                         EINVAL,
1701                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1702                         NULL,
1703                         rte_strerror(EINVAL));
1704
1705         /* Congestion management must not be head drop */
1706         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1707                 return -rte_tm_error_set(error,
1708                         EINVAL,
1709                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1710                         NULL,
1711                         rte_strerror(EINVAL));
1712
1713         /* Congestion management set to WRED */
1714         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1715                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1716                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1717                         wred_profile_id);
1718
1719                 /* WRED profile (for private WRED context) must be valid */
1720                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1721                         wp == NULL)
1722                         return -rte_tm_error_set(error,
1723                                 EINVAL,
1724                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1725                                 NULL,
1726                                 rte_strerror(EINVAL));
1727
1728                 /* No shared WRED contexts */
1729                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1730                         return -rte_tm_error_set(error,
1731                                 EINVAL,
1732                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1733                                 NULL,
1734                                 rte_strerror(EINVAL));
1735         }
1736
1737         /* Stats */
1738         if (params->stats_mask & ~STATS_MASK_QUEUE)
1739                 return -rte_tm_error_set(error,
1740                         EINVAL,
1741                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1742                         NULL,
1743                         rte_strerror(EINVAL));
1744
1745         return 0;
1746 }
1747
1748 static int
1749 node_add_check(struct rte_eth_dev *dev,
1750         uint32_t node_id,
1751         uint32_t parent_node_id,
1752         uint32_t priority,
1753         uint32_t weight,
1754         uint32_t level_id,
1755         struct rte_tm_node_params *params,
1756         struct rte_tm_error *error)
1757 {
1758         struct tm_node *pn;
1759         uint32_t level;
1760         int status;
1761
1762         /* node_id, parent_node_id:
1763          *    -node_id must not be RTE_TM_NODE_ID_NULL
1764          *    -node_id must not be in use
1765          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1766          *        -root node must not exist
1767          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1768          *        -parent_node_id must be valid
1769          */
1770         if (node_id == RTE_TM_NODE_ID_NULL)
1771                 return -rte_tm_error_set(error,
1772                         EINVAL,
1773                         RTE_TM_ERROR_TYPE_NODE_ID,
1774                         NULL,
1775                         rte_strerror(EINVAL));
1776
1777         if (tm_node_search(dev, node_id))
1778                 return -rte_tm_error_set(error,
1779                         EEXIST,
1780                         RTE_TM_ERROR_TYPE_NODE_ID,
1781                         NULL,
1782                         rte_strerror(EEXIST));
1783
1784         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1785                 pn = NULL;
1786                 if (tm_root_node_present(dev))
1787                         return -rte_tm_error_set(error,
1788                                 EEXIST,
1789                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1790                                 NULL,
1791                                 rte_strerror(EEXIST));
1792         } else {
1793                 pn = tm_node_search(dev, parent_node_id);
1794                 if (pn == NULL)
1795                         return -rte_tm_error_set(error,
1796                                 EINVAL,
1797                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1798                                 NULL,
1799                                 rte_strerror(EINVAL));
1800         }
1801
1802         /* priority: must be 0 .. 3 */
1803         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1804                 return -rte_tm_error_set(error,
1805                         EINVAL,
1806                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1807                         NULL,
1808                         rte_strerror(EINVAL));
1809
1810         /* weight: must be 1 .. 255 */
1811         if (weight == 0 || weight >= UINT8_MAX)
1812                 return -rte_tm_error_set(error,
1813                         EINVAL,
1814                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1815                         NULL,
1816                         rte_strerror(EINVAL));
1817
1818         /* level_id: if valid, then
1819          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1820          *        -level_id must be zero
1821          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1822          *        -level_id must be parent level ID plus one
1823          */
1824         level = (pn == NULL) ? 0 : pn->level + 1;
1825         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1826                 return -rte_tm_error_set(error,
1827                         EINVAL,
1828                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1829                         NULL,
1830                         rte_strerror(EINVAL));
1831
1832         /* params: must not be NULL */
1833         if (params == NULL)
1834                 return -rte_tm_error_set(error,
1835                         EINVAL,
1836                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1837                         NULL,
1838                         rte_strerror(EINVAL));
1839
1840         /* params: per level checks */
1841         switch (level) {
1842         case TM_NODE_LEVEL_PORT:
1843                 status = node_add_check_port(dev, node_id,
1844                         parent_node_id, priority, weight, level_id,
1845                         params, error);
1846                 if (status)
1847                         return status;
1848                 break;
1849
1850         case TM_NODE_LEVEL_SUBPORT:
1851                 status = node_add_check_subport(dev, node_id,
1852                         parent_node_id, priority, weight, level_id,
1853                         params, error);
1854                 if (status)
1855                         return status;
1856                 break;
1857
1858         case TM_NODE_LEVEL_PIPE:
1859                 status = node_add_check_pipe(dev, node_id,
1860                         parent_node_id, priority, weight, level_id,
1861                         params, error);
1862                 if (status)
1863                         return status;
1864                 break;
1865
1866         case TM_NODE_LEVEL_TC:
1867                 status = node_add_check_tc(dev, node_id,
1868                         parent_node_id, priority, weight, level_id,
1869                         params, error);
1870                 if (status)
1871                         return status;
1872                 break;
1873
1874         case TM_NODE_LEVEL_QUEUE:
1875                 status = node_add_check_queue(dev, node_id,
1876                         parent_node_id, priority, weight, level_id,
1877                         params, error);
1878                 if (status)
1879                         return status;
1880                 break;
1881
1882         default:
1883                 return -rte_tm_error_set(error,
1884                         EINVAL,
1885                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1886                         NULL,
1887                         rte_strerror(EINVAL));
1888         }
1889
1890         return 0;
1891 }
1892
1893 /* Traffic manager node add */
1894 static int
1895 pmd_tm_node_add(struct rte_eth_dev *dev,
1896         uint32_t node_id,
1897         uint32_t parent_node_id,
1898         uint32_t priority,
1899         uint32_t weight,
1900         uint32_t level_id,
1901         struct rte_tm_node_params *params,
1902         struct rte_tm_error *error)
1903 {
1904         struct pmd_internals *p = dev->data->dev_private;
1905         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1906         struct tm_node *n;
1907         uint32_t i;
1908         int status;
1909
1910         /* Checks */
1911         if (p->soft.tm.hierarchy_frozen)
1912                 return -rte_tm_error_set(error,
1913                         EBUSY,
1914                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1915                         NULL,
1916                         rte_strerror(EBUSY));
1917
1918         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1919                 level_id, params, error);
1920         if (status)
1921                 return status;
1922
1923         /* Memory allocation */
1924         n = calloc(1, sizeof(struct tm_node));
1925         if (n == NULL)
1926                 return -rte_tm_error_set(error,
1927                         ENOMEM,
1928                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1929                         NULL,
1930                         rte_strerror(ENOMEM));
1931
1932         /* Fill in */
1933         n->node_id = node_id;
1934         n->parent_node_id = parent_node_id;
1935         n->priority = priority;
1936         n->weight = weight;
1937
1938         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1939                 n->parent_node = tm_node_search(dev, parent_node_id);
1940                 n->level = n->parent_node->level + 1;
1941         }
1942
1943         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1944                 n->shaper_profile = tm_shaper_profile_search(dev,
1945                         params->shaper_profile_id);
1946
1947         if (n->level == TM_NODE_LEVEL_QUEUE &&
1948                 params->leaf.cman == RTE_TM_CMAN_WRED)
1949                 n->wred_profile = tm_wred_profile_search(dev,
1950                         params->leaf.wred.wred_profile_id);
1951
1952         memcpy(&n->params, params, sizeof(n->params));
1953
1954         /* Add to list */
1955         TAILQ_INSERT_TAIL(nl, n, node);
1956         p->soft.tm.h.n_nodes++;
1957
1958         /* Update dependencies */
1959         if (n->parent_node)
1960                 n->parent_node->n_children++;
1961
1962         if (n->shaper_profile)
1963                 n->shaper_profile->n_users++;
1964
1965         for (i = 0; i < params->n_shared_shapers; i++) {
1966                 struct tm_shared_shaper *ss;
1967
1968                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1969                 ss->n_users++;
1970         }
1971
1972         if (n->wred_profile)
1973                 n->wred_profile->n_users++;
1974
1975         p->soft.tm.h.n_tm_nodes[n->level]++;
1976
1977         return 0;
1978 }
1979
1980 /* Traffic manager node delete */
1981 static int
1982 pmd_tm_node_delete(struct rte_eth_dev *dev,
1983         uint32_t node_id,
1984         struct rte_tm_error *error)
1985 {
1986         struct pmd_internals *p = dev->data->dev_private;
1987         struct tm_node *n;
1988         uint32_t i;
1989
1990         /* Check hierarchy changes are currently allowed */
1991         if (p->soft.tm.hierarchy_frozen)
1992                 return -rte_tm_error_set(error,
1993                         EBUSY,
1994                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1995                         NULL,
1996                         rte_strerror(EBUSY));
1997
1998         /* Check existing */
1999         n = tm_node_search(dev, node_id);
2000         if (n == NULL)
2001                 return -rte_tm_error_set(error,
2002                         EINVAL,
2003                         RTE_TM_ERROR_TYPE_NODE_ID,
2004                         NULL,
2005                         rte_strerror(EINVAL));
2006
2007         /* Check unused */
2008         if (n->n_children)
2009                 return -rte_tm_error_set(error,
2010                         EBUSY,
2011                         RTE_TM_ERROR_TYPE_NODE_ID,
2012                         NULL,
2013                         rte_strerror(EBUSY));
2014
2015         /* Update dependencies */
2016         p->soft.tm.h.n_tm_nodes[n->level]--;
2017
2018         if (n->wred_profile)
2019                 n->wred_profile->n_users--;
2020
2021         for (i = 0; i < n->params.n_shared_shapers; i++) {
2022                 struct tm_shared_shaper *ss;
2023
2024                 ss = tm_shared_shaper_search(dev,
2025                                 n->params.shared_shaper_id[i]);
2026                 ss->n_users--;
2027         }
2028
2029         if (n->shaper_profile)
2030                 n->shaper_profile->n_users--;
2031
2032         if (n->parent_node)
2033                 n->parent_node->n_children--;
2034
2035         /* Remove from list */
2036         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2037         p->soft.tm.h.n_nodes--;
2038         free(n);
2039
2040         return 0;
2041 }
2042
2043
2044 static void
2045 pipe_profile_build(struct rte_eth_dev *dev,
2046         struct tm_node *np,
2047         struct rte_sched_pipe_params *pp)
2048 {
2049         struct pmd_internals *p = dev->data->dev_private;
2050         struct tm_hierarchy *h = &p->soft.tm.h;
2051         struct tm_node_list *nl = &h->nodes;
2052         struct tm_node *nt, *nq;
2053
2054         memset(pp, 0, sizeof(*pp));
2055
2056         /* Pipe */
2057         pp->tb_rate = np->shaper_profile->params.peak.rate;
2058         pp->tb_size = np->shaper_profile->params.peak.size;
2059
2060         /* Traffic Class (TC) */
2061         pp->tc_period = PIPE_TC_PERIOD;
2062
2063 #ifdef RTE_SCHED_SUBPORT_TC_OV
2064         pp->tc_ov_weight = np->weight;
2065 #endif
2066
2067         TAILQ_FOREACH(nt, nl, node) {
2068                 uint32_t queue_id = 0;
2069
2070                 if (nt->level != TM_NODE_LEVEL_TC ||
2071                         nt->parent_node_id != np->node_id)
2072                         continue;
2073
2074                 pp->tc_rate[nt->priority] =
2075                         nt->shaper_profile->params.peak.rate;
2076
2077                 /* Queue */
2078                 TAILQ_FOREACH(nq, nl, node) {
2079                         uint32_t pipe_queue_id;
2080
2081                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2082                                 nq->parent_node_id != nt->node_id)
2083                                 continue;
2084
2085                         pipe_queue_id = nt->priority *
2086                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2087                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2088
2089                         queue_id++;
2090                 }
2091         }
2092 }
2093
2094 static int
2095 pipe_profile_free_exists(struct rte_eth_dev *dev,
2096         uint32_t *pipe_profile_id)
2097 {
2098         struct pmd_internals *p = dev->data->dev_private;
2099         struct tm_params *t = &p->soft.tm.params;
2100
2101         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2102                 *pipe_profile_id = t->n_pipe_profiles;
2103                 return 1;
2104         }
2105
2106         return 0;
2107 }
2108
2109 static int
2110 pipe_profile_exists(struct rte_eth_dev *dev,
2111         struct rte_sched_pipe_params *pp,
2112         uint32_t *pipe_profile_id)
2113 {
2114         struct pmd_internals *p = dev->data->dev_private;
2115         struct tm_params *t = &p->soft.tm.params;
2116         uint32_t i;
2117
2118         for (i = 0; i < t->n_pipe_profiles; i++)
2119                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2120                         if (pipe_profile_id)
2121                                 *pipe_profile_id = i;
2122                         return 1;
2123                 }
2124
2125         return 0;
2126 }
2127
2128 static void
2129 pipe_profile_install(struct rte_eth_dev *dev,
2130         struct rte_sched_pipe_params *pp,
2131         uint32_t pipe_profile_id)
2132 {
2133         struct pmd_internals *p = dev->data->dev_private;
2134         struct tm_params *t = &p->soft.tm.params;
2135
2136         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2137         t->n_pipe_profiles++;
2138 }
2139
2140 static void
2141 pipe_profile_mark(struct rte_eth_dev *dev,
2142         uint32_t subport_id,
2143         uint32_t pipe_id,
2144         uint32_t pipe_profile_id)
2145 {
2146         struct pmd_internals *p = dev->data->dev_private;
2147         struct tm_hierarchy *h = &p->soft.tm.h;
2148         struct tm_params *t = &p->soft.tm.params;
2149         uint32_t n_pipes_per_subport, pos;
2150
2151         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2152                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2153         pos = subport_id * n_pipes_per_subport + pipe_id;
2154
2155         t->pipe_to_profile[pos] = pipe_profile_id;
2156 }
2157
2158 static struct rte_sched_pipe_params *
2159 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2160 {
2161         struct pmd_internals *p = dev->data->dev_private;
2162         struct tm_hierarchy *h = &p->soft.tm.h;
2163         struct tm_params *t = &p->soft.tm.params;
2164         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2165                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2166
2167         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2168         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2169
2170         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2171         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2172
2173         return &t->pipe_profiles[pipe_profile_id];
2174 }
2175
2176 static int
2177 pipe_profiles_generate(struct rte_eth_dev *dev)
2178 {
2179         struct pmd_internals *p = dev->data->dev_private;
2180         struct tm_hierarchy *h = &p->soft.tm.h;
2181         struct tm_node_list *nl = &h->nodes;
2182         struct tm_node *ns, *np;
2183         uint32_t subport_id;
2184
2185         /* Objective: Fill in the following fields in struct tm_params:
2186          *    - pipe_profiles
2187          *    - n_pipe_profiles
2188          *    - pipe_to_profile
2189          */
2190
2191         subport_id = 0;
2192         TAILQ_FOREACH(ns, nl, node) {
2193                 uint32_t pipe_id;
2194
2195                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2196                         continue;
2197
2198                 pipe_id = 0;
2199                 TAILQ_FOREACH(np, nl, node) {
2200                         struct rte_sched_pipe_params pp;
2201                         uint32_t pos;
2202
2203                         if (np->level != TM_NODE_LEVEL_PIPE ||
2204                                 np->parent_node_id != ns->node_id)
2205                                 continue;
2206
2207                         pipe_profile_build(dev, np, &pp);
2208
2209                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2210                                 if (!pipe_profile_free_exists(dev, &pos))
2211                                         return -1;
2212
2213                                 pipe_profile_install(dev, &pp, pos);
2214                         }
2215
2216                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2217
2218                         pipe_id++;
2219                 }
2220
2221                 subport_id++;
2222         }
2223
2224         return 0;
2225 }
2226
2227 static struct tm_wred_profile *
2228 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2229 {
2230         struct pmd_internals *p = dev->data->dev_private;
2231         struct tm_hierarchy *h = &p->soft.tm.h;
2232         struct tm_node_list *nl = &h->nodes;
2233         struct tm_node *nq;
2234
2235         TAILQ_FOREACH(nq, nl, node) {
2236                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2237                         nq->parent_node->priority != tc_id)
2238                         continue;
2239
2240                 return nq->wred_profile;
2241         }
2242
2243         return NULL;
2244 }
2245
2246 #ifdef RTE_SCHED_RED
2247
2248 static void
2249 wred_profiles_set(struct rte_eth_dev *dev)
2250 {
2251         struct pmd_internals *p = dev->data->dev_private;
2252         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2253         uint32_t tc_id;
2254         enum rte_tm_color color;
2255
2256         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2257                 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2258                         struct rte_red_params *dst =
2259                                 &pp->red_params[tc_id][color];
2260                         struct tm_wred_profile *src_wp =
2261                                 tm_tc_wred_profile_get(dev, tc_id);
2262                         struct rte_tm_red_params *src =
2263                                 &src_wp->params.red_params[color];
2264
2265                         memcpy(dst, src, sizeof(*dst));
2266                 }
2267 }
2268
2269 #else
2270
2271 #define wred_profiles_set(dev)
2272
2273 #endif
2274
2275 static struct tm_shared_shaper *
2276 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2277 {
2278         return (tc_node->params.n_shared_shapers) ?
2279                 tm_shared_shaper_search(dev,
2280                         tc_node->params.shared_shaper_id[0]) :
2281                 NULL;
2282 }
2283
2284 static struct tm_shared_shaper *
2285 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2286         struct tm_node *subport_node,
2287         uint32_t tc_id)
2288 {
2289         struct pmd_internals *p = dev->data->dev_private;
2290         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2291         struct tm_node *n;
2292
2293         TAILQ_FOREACH(n, nl, node) {
2294                 if (n->level != TM_NODE_LEVEL_TC ||
2295                         n->parent_node->parent_node_id !=
2296                                 subport_node->node_id ||
2297                         n->priority != tc_id)
2298                         continue;
2299
2300                 return tm_tc_shared_shaper_get(dev, n);
2301         }
2302
2303         return NULL;
2304 }
2305
2306 static int
2307 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2308 {
2309         struct pmd_internals *p = dev->data->dev_private;
2310         struct tm_hierarchy *h = &p->soft.tm.h;
2311         struct tm_node_list *nl = &h->nodes;
2312         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2313         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2314         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2315         struct tm_shared_shaper *ss;
2316
2317         uint32_t n_pipes_per_subport;
2318
2319         /* Root node exists. */
2320         if (nr == NULL)
2321                 return -rte_tm_error_set(error,
2322                         EINVAL,
2323                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2324                         NULL,
2325                         rte_strerror(EINVAL));
2326
2327         /* There is at least one subport, max is not exceeded. */
2328         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2329                 return -rte_tm_error_set(error,
2330                         EINVAL,
2331                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2332                         NULL,
2333                         rte_strerror(EINVAL));
2334
2335         /* There is at least one pipe. */
2336         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2337                 return -rte_tm_error_set(error,
2338                         EINVAL,
2339                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2340                         NULL,
2341                         rte_strerror(EINVAL));
2342
2343         /* Number of pipes is the same for all subports. Maximum number of pipes
2344          * per subport is not exceeded.
2345          */
2346         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2347                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2348
2349         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2350                 return -rte_tm_error_set(error,
2351                         EINVAL,
2352                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2353                         NULL,
2354                         rte_strerror(EINVAL));
2355
2356         TAILQ_FOREACH(ns, nl, node) {
2357                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2358                         continue;
2359
2360                 if (ns->n_children != n_pipes_per_subport)
2361                         return -rte_tm_error_set(error,
2362                                 EINVAL,
2363                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2364                                 NULL,
2365                                 rte_strerror(EINVAL));
2366         }
2367
2368         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2369         TAILQ_FOREACH(np, nl, node) {
2370                 uint32_t mask = 0, mask_expected =
2371                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2372                                 uint32_t);
2373
2374                 if (np->level != TM_NODE_LEVEL_PIPE)
2375                         continue;
2376
2377                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2378                         return -rte_tm_error_set(error,
2379                                 EINVAL,
2380                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2381                                 NULL,
2382                                 rte_strerror(EINVAL));
2383
2384                 TAILQ_FOREACH(nt, nl, node) {
2385                         if (nt->level != TM_NODE_LEVEL_TC ||
2386                                 nt->parent_node_id != np->node_id)
2387                                 continue;
2388
2389                         mask |= 1 << nt->priority;
2390                 }
2391
2392                 if (mask != mask_expected)
2393                         return -rte_tm_error_set(error,
2394                                 EINVAL,
2395                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2396                                 NULL,
2397                                 rte_strerror(EINVAL));
2398         }
2399
2400         /* Each TC has exactly 4 packet queues. */
2401         TAILQ_FOREACH(nt, nl, node) {
2402                 if (nt->level != TM_NODE_LEVEL_TC)
2403                         continue;
2404
2405                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2406                         return -rte_tm_error_set(error,
2407                                 EINVAL,
2408                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2409                                 NULL,
2410                                 rte_strerror(EINVAL));
2411         }
2412
2413         /**
2414          * Shared shapers:
2415          *    -For each TC #i, all pipes in the same subport use the same
2416          *     shared shaper (or no shared shaper) for their TC#i.
2417          *    -Each shared shaper needs to have at least one user. All its
2418          *     users have to be TC nodes with the same priority and the same
2419          *     subport.
2420          */
2421         TAILQ_FOREACH(ns, nl, node) {
2422                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2423                 uint32_t id;
2424
2425                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2426                         continue;
2427
2428                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2429                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2430
2431                 TAILQ_FOREACH(nt, nl, node) {
2432                         struct tm_shared_shaper *subport_ss, *tc_ss;
2433
2434                         if (nt->level != TM_NODE_LEVEL_TC ||
2435                                 nt->parent_node->parent_node_id !=
2436                                         ns->node_id)
2437                                 continue;
2438
2439                         subport_ss = s[nt->priority];
2440                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2441
2442                         if (subport_ss == NULL && tc_ss == NULL)
2443                                 continue;
2444
2445                         if ((subport_ss == NULL && tc_ss != NULL) ||
2446                                 (subport_ss != NULL && tc_ss == NULL) ||
2447                                 subport_ss->shared_shaper_id !=
2448                                         tc_ss->shared_shaper_id)
2449                                 return -rte_tm_error_set(error,
2450                                         EINVAL,
2451                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2452                                         NULL,
2453                                         rte_strerror(EINVAL));
2454                 }
2455         }
2456
2457         TAILQ_FOREACH(ss, ssl, node) {
2458                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2459                 uint32_t n_users = 0;
2460
2461                 if (nt_any != NULL)
2462                         TAILQ_FOREACH(nt, nl, node) {
2463                                 if (nt->level != TM_NODE_LEVEL_TC ||
2464                                         nt->priority != nt_any->priority ||
2465                                         nt->parent_node->parent_node_id !=
2466                                         nt_any->parent_node->parent_node_id)
2467                                         continue;
2468
2469                                 n_users++;
2470                         }
2471
2472                 if (ss->n_users == 0 || ss->n_users != n_users)
2473                         return -rte_tm_error_set(error,
2474                                 EINVAL,
2475                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2476                                 NULL,
2477                                 rte_strerror(EINVAL));
2478         }
2479
2480         /* Not too many pipe profiles. */
2481         if (pipe_profiles_generate(dev))
2482                 return -rte_tm_error_set(error,
2483                         EINVAL,
2484                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2485                         NULL,
2486                         rte_strerror(EINVAL));
2487
2488         /**
2489          * WRED (when used, i.e. at least one WRED profile defined):
2490          *    -Each WRED profile must have at least one user.
2491          *    -All leaf nodes must have their private WRED context enabled.
2492          *    -For each TC #i, all leaf nodes must use the same WRED profile
2493          *     for their private WRED context.
2494          */
2495         if (h->n_wred_profiles) {
2496                 struct tm_wred_profile *wp;
2497                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2498                 uint32_t id;
2499
2500                 TAILQ_FOREACH(wp, wpl, node)
2501                         if (wp->n_users == 0)
2502                                 return -rte_tm_error_set(error,
2503                                         EINVAL,
2504                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2505                                         NULL,
2506                                         rte_strerror(EINVAL));
2507
2508                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2509                         w[id] = tm_tc_wred_profile_get(dev, id);
2510
2511                         if (w[id] == NULL)
2512                                 return -rte_tm_error_set(error,
2513                                         EINVAL,
2514                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2515                                         NULL,
2516                                         rte_strerror(EINVAL));
2517                 }
2518
2519                 TAILQ_FOREACH(nq, nl, node) {
2520                         uint32_t id;
2521
2522                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2523                                 continue;
2524
2525                         id = nq->parent_node->priority;
2526
2527                         if (nq->wred_profile == NULL ||
2528                                 nq->wred_profile->wred_profile_id !=
2529                                         w[id]->wred_profile_id)
2530                                 return -rte_tm_error_set(error,
2531                                         EINVAL,
2532                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2533                                         NULL,
2534                                         rte_strerror(EINVAL));
2535                 }
2536         }
2537
2538         return 0;
2539 }
2540
2541 static void
2542 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2543 {
2544         struct pmd_internals *p = dev->data->dev_private;
2545         struct tm_params *t = &p->soft.tm.params;
2546         struct tm_hierarchy *h = &p->soft.tm.h;
2547
2548         struct tm_node_list *nl = &h->nodes;
2549         struct tm_node *root = tm_root_node_present(dev), *n;
2550
2551         uint32_t subport_id;
2552
2553         t->port_params = (struct rte_sched_port_params) {
2554                 .name = dev->data->name,
2555                 .socket = dev->data->numa_node,
2556                 .rate = root->shaper_profile->params.peak.rate,
2557                 .mtu = dev->data->mtu,
2558                 .frame_overhead =
2559                         root->shaper_profile->params.pkt_length_adjust,
2560                 .n_subports_per_port = root->n_children,
2561                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2562                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2563                 .qsize = {p->params.soft.tm.qsize[0],
2564                         p->params.soft.tm.qsize[1],
2565                         p->params.soft.tm.qsize[2],
2566                         p->params.soft.tm.qsize[3],
2567                 },
2568                 .pipe_profiles = t->pipe_profiles,
2569                 .n_pipe_profiles = t->n_pipe_profiles,
2570         };
2571
2572         wred_profiles_set(dev);
2573
2574         subport_id = 0;
2575         TAILQ_FOREACH(n, nl, node) {
2576                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2577                 uint32_t i;
2578
2579                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2580                         continue;
2581
2582                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2583                         struct tm_shared_shaper *ss;
2584                         struct tm_shaper_profile *sp;
2585
2586                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2587                         sp = (ss) ? tm_shaper_profile_search(dev,
2588                                 ss->shaper_profile_id) :
2589                                 n->shaper_profile;
2590                         tc_rate[i] = sp->params.peak.rate;
2591                 }
2592
2593                 t->subport_params[subport_id] =
2594                         (struct rte_sched_subport_params) {
2595                                 .tb_rate = n->shaper_profile->params.peak.rate,
2596                                 .tb_size = n->shaper_profile->params.peak.size,
2597
2598                                 .tc_rate = {tc_rate[0],
2599                                         tc_rate[1],
2600                                         tc_rate[2],
2601                                         tc_rate[3],
2602                         },
2603                         .tc_period = SUBPORT_TC_PERIOD,
2604                 };
2605
2606                 subport_id++;
2607         }
2608 }
2609
2610 /* Traffic manager hierarchy commit */
2611 static int
2612 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2613         int clear_on_fail,
2614         struct rte_tm_error *error)
2615 {
2616         struct pmd_internals *p = dev->data->dev_private;
2617         int status;
2618
2619         /* Checks */
2620         if (p->soft.tm.hierarchy_frozen)
2621                 return -rte_tm_error_set(error,
2622                         EBUSY,
2623                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2624                         NULL,
2625                         rte_strerror(EBUSY));
2626
2627         status = hierarchy_commit_check(dev, error);
2628         if (status) {
2629                 if (clear_on_fail) {
2630                         tm_hierarchy_uninit(p);
2631                         tm_hierarchy_init(p);
2632                 }
2633
2634                 return status;
2635         }
2636
2637         /* Create blueprints */
2638         hierarchy_blueprints_create(dev);
2639
2640         /* Freeze hierarchy */
2641         p->soft.tm.hierarchy_frozen = 1;
2642
2643         return 0;
2644 }
2645
2646 #ifdef RTE_SCHED_SUBPORT_TC_OV
2647
2648 static int
2649 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2650 {
2651         struct pmd_internals *p = dev->data->dev_private;
2652         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2653
2654         struct tm_node *ns = np->parent_node;
2655         uint32_t subport_id = tm_node_subport_id(dev, ns);
2656
2657         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2658         struct rte_sched_pipe_params profile1;
2659         uint32_t pipe_profile_id;
2660
2661         /* Derive new pipe profile. */
2662         memcpy(&profile1, profile0, sizeof(profile1));
2663         profile1.tc_ov_weight = (uint8_t)weight;
2664
2665         /* Since implementation does not allow adding more pipe profiles after
2666          * port configuration, the pipe configuration can be successfully
2667          * updated only if the new profile is also part of the existing set of
2668          * pipe profiles.
2669          */
2670         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2671                 return -1;
2672
2673         /* Update the pipe profile used by the current pipe. */
2674         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2675                 (int32_t)pipe_profile_id))
2676                 return -1;
2677
2678         /* Commit changes. */
2679         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2680         np->weight = weight;
2681
2682         return 0;
2683 }
2684
2685 #endif
2686
2687 static int
2688 update_queue_weight(struct rte_eth_dev *dev,
2689         struct tm_node *nq, uint32_t weight)
2690 {
2691         struct pmd_internals *p = dev->data->dev_private;
2692         uint32_t queue_id = tm_node_queue_id(dev, nq);
2693
2694         struct tm_node *nt = nq->parent_node;
2695         uint32_t tc_id = tm_node_tc_id(dev, nt);
2696
2697         struct tm_node *np = nt->parent_node;
2698         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2699
2700         struct tm_node *ns = np->parent_node;
2701         uint32_t subport_id = tm_node_subport_id(dev, ns);
2702
2703         uint32_t pipe_queue_id =
2704                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2705
2706         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2707         struct rte_sched_pipe_params profile1;
2708         uint32_t pipe_profile_id;
2709
2710         /* Derive new pipe profile. */
2711         memcpy(&profile1, profile0, sizeof(profile1));
2712         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2713
2714         /* Since implementation does not allow adding more pipe profiles after
2715          * port configuration, the pipe configuration can be successfully
2716          * updated only if the new profile is also part of the existing set
2717          * of pipe profiles.
2718          */
2719         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2720                 return -1;
2721
2722         /* Update the pipe profile used by the current pipe. */
2723         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2724                 (int32_t)pipe_profile_id))
2725                 return -1;
2726
2727         /* Commit changes. */
2728         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2729         nq->weight = weight;
2730
2731         return 0;
2732 }
2733
2734 /* Traffic manager node parent update */
2735 static int
2736 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2737         uint32_t node_id,
2738         uint32_t parent_node_id,
2739         uint32_t priority,
2740         uint32_t weight,
2741         struct rte_tm_error *error)
2742 {
2743         struct tm_node *n;
2744
2745         /* Port must be started and TM used. */
2746         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2747                 return -rte_tm_error_set(error,
2748                         EBUSY,
2749                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2750                         NULL,
2751                         rte_strerror(EBUSY));
2752
2753         /* Node must be valid */
2754         n = tm_node_search(dev, node_id);
2755         if (n == NULL)
2756                 return -rte_tm_error_set(error,
2757                         EINVAL,
2758                         RTE_TM_ERROR_TYPE_NODE_ID,
2759                         NULL,
2760                         rte_strerror(EINVAL));
2761
2762         /* Parent node must be the same */
2763         if (n->parent_node_id != parent_node_id)
2764                 return -rte_tm_error_set(error,
2765                         EINVAL,
2766                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2767                         NULL,
2768                         rte_strerror(EINVAL));
2769
2770         /* Priority must be the same */
2771         if (n->priority != priority)
2772                 return -rte_tm_error_set(error,
2773                         EINVAL,
2774                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2775                         NULL,
2776                         rte_strerror(EINVAL));
2777
2778         /* weight: must be 1 .. 255 */
2779         if (weight == 0 || weight >= UINT8_MAX)
2780                 return -rte_tm_error_set(error,
2781                         EINVAL,
2782                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2783                         NULL,
2784                         rte_strerror(EINVAL));
2785
2786         switch (n->level) {
2787         case TM_NODE_LEVEL_PORT:
2788                 return -rte_tm_error_set(error,
2789                         EINVAL,
2790                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2791                         NULL,
2792                         rte_strerror(EINVAL));
2793                 /* fall-through */
2794         case TM_NODE_LEVEL_SUBPORT:
2795                 return -rte_tm_error_set(error,
2796                         EINVAL,
2797                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2798                         NULL,
2799                         rte_strerror(EINVAL));
2800                 /* fall-through */
2801         case TM_NODE_LEVEL_PIPE:
2802 #ifdef RTE_SCHED_SUBPORT_TC_OV
2803                 if (update_pipe_weight(dev, n, weight))
2804                         return -rte_tm_error_set(error,
2805                                 EINVAL,
2806                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2807                                 NULL,
2808                                 rte_strerror(EINVAL));
2809                 return 0;
2810 #else
2811                 return -rte_tm_error_set(error,
2812                         EINVAL,
2813                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2814                         NULL,
2815                         rte_strerror(EINVAL));
2816 #endif
2817                 /* fall-through */
2818         case TM_NODE_LEVEL_TC:
2819                 return -rte_tm_error_set(error,
2820                         EINVAL,
2821                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2822                         NULL,
2823                         rte_strerror(EINVAL));
2824                 /* fall-through */
2825         case TM_NODE_LEVEL_QUEUE:
2826                 /* fall-through */
2827         default:
2828                 if (update_queue_weight(dev, n, weight))
2829                         return -rte_tm_error_set(error,
2830                                 EINVAL,
2831                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2832                                 NULL,
2833                                 rte_strerror(EINVAL));
2834                 return 0;
2835         }
2836 }
2837
2838 static int
2839 update_subport_rate(struct rte_eth_dev *dev,
2840         struct tm_node *ns,
2841         struct tm_shaper_profile *sp)
2842 {
2843         struct pmd_internals *p = dev->data->dev_private;
2844         uint32_t subport_id = tm_node_subport_id(dev, ns);
2845
2846         struct rte_sched_subport_params subport_params;
2847
2848         /* Derive new subport configuration. */
2849         memcpy(&subport_params,
2850                 &p->soft.tm.params.subport_params[subport_id],
2851                 sizeof(subport_params));
2852         subport_params.tb_rate = sp->params.peak.rate;
2853         subport_params.tb_size = sp->params.peak.size;
2854
2855         /* Update the subport configuration. */
2856         if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2857                 &subport_params))
2858                 return -1;
2859
2860         /* Commit changes. */
2861         ns->shaper_profile->n_users--;
2862
2863         ns->shaper_profile = sp;
2864         ns->params.shaper_profile_id = sp->shaper_profile_id;
2865         sp->n_users++;
2866
2867         memcpy(&p->soft.tm.params.subport_params[subport_id],
2868                 &subport_params,
2869                 sizeof(subport_params));
2870
2871         return 0;
2872 }
2873
2874 static int
2875 update_pipe_rate(struct rte_eth_dev *dev,
2876         struct tm_node *np,
2877         struct tm_shaper_profile *sp)
2878 {
2879         struct pmd_internals *p = dev->data->dev_private;
2880         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2881
2882         struct tm_node *ns = np->parent_node;
2883         uint32_t subport_id = tm_node_subport_id(dev, ns);
2884
2885         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2886         struct rte_sched_pipe_params profile1;
2887         uint32_t pipe_profile_id;
2888
2889         /* Derive new pipe profile. */
2890         memcpy(&profile1, profile0, sizeof(profile1));
2891         profile1.tb_rate = sp->params.peak.rate;
2892         profile1.tb_size = sp->params.peak.size;
2893
2894         /* Since implementation does not allow adding more pipe profiles after
2895          * port configuration, the pipe configuration can be successfully
2896          * updated only if the new profile is also part of the existing set of
2897          * pipe profiles.
2898          */
2899         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2900                 return -1;
2901
2902         /* Update the pipe profile used by the current pipe. */
2903         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2904                 (int32_t)pipe_profile_id))
2905                 return -1;
2906
2907         /* Commit changes. */
2908         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2909         np->shaper_profile->n_users--;
2910         np->shaper_profile = sp;
2911         np->params.shaper_profile_id = sp->shaper_profile_id;
2912         sp->n_users++;
2913
2914         return 0;
2915 }
2916
2917 static int
2918 update_tc_rate(struct rte_eth_dev *dev,
2919         struct tm_node *nt,
2920         struct tm_shaper_profile *sp)
2921 {
2922         struct pmd_internals *p = dev->data->dev_private;
2923         uint32_t tc_id = tm_node_tc_id(dev, nt);
2924
2925         struct tm_node *np = nt->parent_node;
2926         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2927
2928         struct tm_node *ns = np->parent_node;
2929         uint32_t subport_id = tm_node_subport_id(dev, ns);
2930
2931         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2932         struct rte_sched_pipe_params profile1;
2933         uint32_t pipe_profile_id;
2934
2935         /* Derive new pipe profile. */
2936         memcpy(&profile1, profile0, sizeof(profile1));
2937         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2938
2939         /* Since implementation does not allow adding more pipe profiles after
2940          * port configuration, the pipe configuration can be successfully
2941          * updated only if the new profile is also part of the existing set of
2942          * pipe profiles.
2943          */
2944         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2945                 return -1;
2946
2947         /* Update the pipe profile used by the current pipe. */
2948         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2949                 (int32_t)pipe_profile_id))
2950                 return -1;
2951
2952         /* Commit changes. */
2953         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2954         nt->shaper_profile->n_users--;
2955         nt->shaper_profile = sp;
2956         nt->params.shaper_profile_id = sp->shaper_profile_id;
2957         sp->n_users++;
2958
2959         return 0;
2960 }
2961
2962 /* Traffic manager node shaper update */
2963 static int
2964 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2965         uint32_t node_id,
2966         uint32_t shaper_profile_id,
2967         struct rte_tm_error *error)
2968 {
2969         struct tm_node *n;
2970         struct tm_shaper_profile *sp;
2971
2972         /* Port must be started and TM used. */
2973         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2974                 return -rte_tm_error_set(error,
2975                         EBUSY,
2976                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2977                         NULL,
2978                         rte_strerror(EBUSY));
2979
2980         /* Node must be valid */
2981         n = tm_node_search(dev, node_id);
2982         if (n == NULL)
2983                 return -rte_tm_error_set(error,
2984                         EINVAL,
2985                         RTE_TM_ERROR_TYPE_NODE_ID,
2986                         NULL,
2987                         rte_strerror(EINVAL));
2988
2989         /* Shaper profile must be valid. */
2990         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2991         if (sp == NULL)
2992                 return -rte_tm_error_set(error,
2993                         EINVAL,
2994                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2995                         NULL,
2996                         rte_strerror(EINVAL));
2997
2998         switch (n->level) {
2999         case TM_NODE_LEVEL_PORT:
3000                 return -rte_tm_error_set(error,
3001                         EINVAL,
3002                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3003                         NULL,
3004                         rte_strerror(EINVAL));
3005                 /* fall-through */
3006         case TM_NODE_LEVEL_SUBPORT:
3007                 if (update_subport_rate(dev, n, sp))
3008                         return -rte_tm_error_set(error,
3009                                 EINVAL,
3010                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3011                                 NULL,
3012                                 rte_strerror(EINVAL));
3013                 return 0;
3014                 /* fall-through */
3015         case TM_NODE_LEVEL_PIPE:
3016                 if (update_pipe_rate(dev, n, sp))
3017                         return -rte_tm_error_set(error,
3018                                 EINVAL,
3019                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3020                                 NULL,
3021                                 rte_strerror(EINVAL));
3022                 return 0;
3023                 /* fall-through */
3024         case TM_NODE_LEVEL_TC:
3025                 if (update_tc_rate(dev, n, sp))
3026                         return -rte_tm_error_set(error,
3027                                 EINVAL,
3028                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3029                                 NULL,
3030                                 rte_strerror(EINVAL));
3031                 return 0;
3032                 /* fall-through */
3033         case TM_NODE_LEVEL_QUEUE:
3034                 /* fall-through */
3035         default:
3036                 return -rte_tm_error_set(error,
3037                         EINVAL,
3038                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3039                         NULL,
3040                         rte_strerror(EINVAL));
3041         }
3042 }
3043
3044 static inline uint32_t
3045 tm_port_queue_id(struct rte_eth_dev *dev,
3046         uint32_t port_subport_id,
3047         uint32_t subport_pipe_id,
3048         uint32_t pipe_tc_id,
3049         uint32_t tc_queue_id)
3050 {
3051         struct pmd_internals *p = dev->data->dev_private;
3052         struct tm_hierarchy *h = &p->soft.tm.h;
3053         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3054                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3055
3056         uint32_t port_pipe_id =
3057                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3058         uint32_t port_tc_id =
3059                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3060         uint32_t port_queue_id =
3061                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3062
3063         return port_queue_id;
3064 }
3065
3066 static int
3067 read_port_stats(struct rte_eth_dev *dev,
3068         struct tm_node *nr,
3069         struct rte_tm_node_stats *stats,
3070         uint64_t *stats_mask,
3071         int clear)
3072 {
3073         struct pmd_internals *p = dev->data->dev_private;
3074         struct tm_hierarchy *h = &p->soft.tm.h;
3075         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3076         uint32_t subport_id;
3077
3078         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3079                 struct rte_sched_subport_stats s;
3080                 uint32_t tc_ov, id;
3081
3082                 /* Stats read */
3083                 int status = rte_sched_subport_read_stats(
3084                         p->soft.tm.sched,
3085                         subport_id,
3086                         &s,
3087                         &tc_ov);
3088                 if (status)
3089                         return status;
3090
3091                 /* Stats accumulate */
3092                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3093                         nr->stats.n_pkts +=
3094                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3095                         nr->stats.n_bytes +=
3096                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3097                         nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3098                                 s.n_pkts_tc_dropped[id];
3099                         nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3100                                 s.n_bytes_tc_dropped[id];
3101                 }
3102         }
3103
3104         /* Stats copy */
3105         if (stats)
3106                 memcpy(stats, &nr->stats, sizeof(*stats));
3107
3108         if (stats_mask)
3109                 *stats_mask = STATS_MASK_DEFAULT;
3110
3111         /* Stats clear */
3112         if (clear)
3113                 memset(&nr->stats, 0, sizeof(nr->stats));
3114
3115         return 0;
3116 }
3117
3118 static int
3119 read_subport_stats(struct rte_eth_dev *dev,
3120         struct tm_node *ns,
3121         struct rte_tm_node_stats *stats,
3122         uint64_t *stats_mask,
3123         int clear)
3124 {
3125         struct pmd_internals *p = dev->data->dev_private;
3126         uint32_t subport_id = tm_node_subport_id(dev, ns);
3127         struct rte_sched_subport_stats s;
3128         uint32_t tc_ov, tc_id;
3129
3130         /* Stats read */
3131         int status = rte_sched_subport_read_stats(
3132                 p->soft.tm.sched,
3133                 subport_id,
3134                 &s,
3135                 &tc_ov);
3136         if (status)
3137                 return status;
3138
3139         /* Stats accumulate */
3140         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3141                 ns->stats.n_pkts +=
3142                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3143                 ns->stats.n_bytes +=
3144                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3145                 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3146                         s.n_pkts_tc_dropped[tc_id];
3147                 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3148                         s.n_bytes_tc_dropped[tc_id];
3149         }
3150
3151         /* Stats copy */
3152         if (stats)
3153                 memcpy(stats, &ns->stats, sizeof(*stats));
3154
3155         if (stats_mask)
3156                 *stats_mask = STATS_MASK_DEFAULT;
3157
3158         /* Stats clear */
3159         if (clear)
3160                 memset(&ns->stats, 0, sizeof(ns->stats));
3161
3162         return 0;
3163 }
3164
3165 static int
3166 read_pipe_stats(struct rte_eth_dev *dev,
3167         struct tm_node *np,
3168         struct rte_tm_node_stats *stats,
3169         uint64_t *stats_mask,
3170         int clear)
3171 {
3172         struct pmd_internals *p = dev->data->dev_private;
3173
3174         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3175
3176         struct tm_node *ns = np->parent_node;
3177         uint32_t subport_id = tm_node_subport_id(dev, ns);
3178
3179         uint32_t i;
3180
3181         /* Stats read */
3182         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3183                 struct rte_sched_queue_stats s;
3184                 uint16_t qlen;
3185
3186                 uint32_t qid = tm_port_queue_id(dev,
3187                         subport_id,
3188                         pipe_id,
3189                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3190                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3191
3192                 int status = rte_sched_queue_read_stats(
3193                         p->soft.tm.sched,
3194                         qid,
3195                         &s,
3196                         &qlen);
3197                 if (status)
3198                         return status;
3199
3200                 /* Stats accumulate */
3201                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3202                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3203                 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3204                 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3205                         s.n_bytes_dropped;
3206                 np->stats.leaf.n_pkts_queued = qlen;
3207         }
3208
3209         /* Stats copy */
3210         if (stats)
3211                 memcpy(stats, &np->stats, sizeof(*stats));
3212
3213         if (stats_mask)
3214                 *stats_mask = STATS_MASK_DEFAULT;
3215
3216         /* Stats clear */
3217         if (clear)
3218                 memset(&np->stats, 0, sizeof(np->stats));
3219
3220         return 0;
3221 }
3222
3223 static int
3224 read_tc_stats(struct rte_eth_dev *dev,
3225         struct tm_node *nt,
3226         struct rte_tm_node_stats *stats,
3227         uint64_t *stats_mask,
3228         int clear)
3229 {
3230         struct pmd_internals *p = dev->data->dev_private;
3231
3232         uint32_t tc_id = tm_node_tc_id(dev, nt);
3233
3234         struct tm_node *np = nt->parent_node;
3235         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3236
3237         struct tm_node *ns = np->parent_node;
3238         uint32_t subport_id = tm_node_subport_id(dev, ns);
3239
3240         uint32_t i;
3241
3242         /* Stats read */
3243         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3244                 struct rte_sched_queue_stats s;
3245                 uint16_t qlen;
3246
3247                 uint32_t qid = tm_port_queue_id(dev,
3248                         subport_id,
3249                         pipe_id,
3250                         tc_id,
3251                         i);
3252
3253                 int status = rte_sched_queue_read_stats(
3254                         p->soft.tm.sched,
3255                         qid,
3256                         &s,
3257                         &qlen);
3258                 if (status)
3259                         return status;
3260
3261                 /* Stats accumulate */
3262                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3263                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3264                 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3265                 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3266                         s.n_bytes_dropped;
3267                 nt->stats.leaf.n_pkts_queued = qlen;
3268         }
3269
3270         /* Stats copy */
3271         if (stats)
3272                 memcpy(stats, &nt->stats, sizeof(*stats));
3273
3274         if (stats_mask)
3275                 *stats_mask = STATS_MASK_DEFAULT;
3276
3277         /* Stats clear */
3278         if (clear)
3279                 memset(&nt->stats, 0, sizeof(nt->stats));
3280
3281         return 0;
3282 }
3283
3284 static int
3285 read_queue_stats(struct rte_eth_dev *dev,
3286         struct tm_node *nq,
3287         struct rte_tm_node_stats *stats,
3288         uint64_t *stats_mask,
3289         int clear)
3290 {
3291         struct pmd_internals *p = dev->data->dev_private;
3292         struct rte_sched_queue_stats s;
3293         uint16_t qlen;
3294
3295         uint32_t queue_id = tm_node_queue_id(dev, nq);
3296
3297         struct tm_node *nt = nq->parent_node;
3298         uint32_t tc_id = tm_node_tc_id(dev, nt);
3299
3300         struct tm_node *np = nt->parent_node;
3301         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3302
3303         struct tm_node *ns = np->parent_node;
3304         uint32_t subport_id = tm_node_subport_id(dev, ns);
3305
3306         /* Stats read */
3307         uint32_t qid = tm_port_queue_id(dev,
3308                 subport_id,
3309                 pipe_id,
3310                 tc_id,
3311                 queue_id);
3312
3313         int status = rte_sched_queue_read_stats(
3314                 p->soft.tm.sched,
3315                 qid,
3316                 &s,
3317                 &qlen);
3318         if (status)
3319                 return status;
3320
3321         /* Stats accumulate */
3322         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3323         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3324         nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3325         nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3326                 s.n_bytes_dropped;
3327         nq->stats.leaf.n_pkts_queued = qlen;
3328
3329         /* Stats copy */
3330         if (stats)
3331                 memcpy(stats, &nq->stats, sizeof(*stats));
3332
3333         if (stats_mask)
3334                 *stats_mask = STATS_MASK_QUEUE;
3335
3336         /* Stats clear */
3337         if (clear)
3338                 memset(&nq->stats, 0, sizeof(nq->stats));
3339
3340         return 0;
3341 }
3342
3343 /* Traffic manager read stats counters for specific node */
3344 static int
3345 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3346         uint32_t node_id,
3347         struct rte_tm_node_stats *stats,
3348         uint64_t *stats_mask,
3349         int clear,
3350         struct rte_tm_error *error)
3351 {
3352         struct tm_node *n;
3353
3354         /* Port must be started and TM used. */
3355         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3356                 return -rte_tm_error_set(error,
3357                         EBUSY,
3358                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3359                         NULL,
3360                         rte_strerror(EBUSY));
3361
3362         /* Node must be valid */
3363         n = tm_node_search(dev, node_id);
3364         if (n == NULL)
3365                 return -rte_tm_error_set(error,
3366                         EINVAL,
3367                         RTE_TM_ERROR_TYPE_NODE_ID,
3368                         NULL,
3369                         rte_strerror(EINVAL));
3370
3371         switch (n->level) {
3372         case TM_NODE_LEVEL_PORT:
3373                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3374                         return -rte_tm_error_set(error,
3375                                 EINVAL,
3376                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3377                                 NULL,
3378                                 rte_strerror(EINVAL));
3379                 return 0;
3380
3381         case TM_NODE_LEVEL_SUBPORT:
3382                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3383                         return -rte_tm_error_set(error,
3384                                 EINVAL,
3385                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3386                                 NULL,
3387                                 rte_strerror(EINVAL));
3388                 return 0;
3389
3390         case TM_NODE_LEVEL_PIPE:
3391                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3392                         return -rte_tm_error_set(error,
3393                                 EINVAL,
3394                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3395                                 NULL,
3396                                 rte_strerror(EINVAL));
3397                 return 0;
3398
3399         case TM_NODE_LEVEL_TC:
3400                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3401                         return -rte_tm_error_set(error,
3402                                 EINVAL,
3403                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3404                                 NULL,
3405                                 rte_strerror(EINVAL));
3406                 return 0;
3407
3408         case TM_NODE_LEVEL_QUEUE:
3409         default:
3410                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3411                         return -rte_tm_error_set(error,
3412                                 EINVAL,
3413                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3414                                 NULL,
3415                                 rte_strerror(EINVAL));
3416                 return 0;
3417         }
3418 }
3419
3420 const struct rte_tm_ops pmd_tm_ops = {
3421         .node_type_get = pmd_tm_node_type_get,
3422         .capabilities_get = pmd_tm_capabilities_get,
3423         .level_capabilities_get = pmd_tm_level_capabilities_get,
3424         .node_capabilities_get = pmd_tm_node_capabilities_get,
3425
3426         .wred_profile_add = pmd_tm_wred_profile_add,
3427         .wred_profile_delete = pmd_tm_wred_profile_delete,
3428         .shared_wred_context_add_update = NULL,
3429         .shared_wred_context_delete = NULL,
3430
3431         .shaper_profile_add = pmd_tm_shaper_profile_add,
3432         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3433         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3434         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3435
3436         .node_add = pmd_tm_node_add,
3437         .node_delete = pmd_tm_node_delete,
3438         .node_suspend = NULL,
3439         .node_resume = NULL,
3440         .hierarchy_commit = pmd_tm_hierarchy_commit,
3441
3442         .node_parent_update = pmd_tm_node_parent_update,
3443         .node_shaper_update = pmd_tm_node_shaper_update,
3444         .node_shared_shaper_update = NULL,
3445         .node_stats_update = NULL,
3446         .node_wfq_weight_mode_update = NULL,
3447         .node_cman_update = NULL,
3448         .node_wred_context_update = NULL,
3449         .node_shared_wred_context_update = NULL,
3450
3451         .node_stats_read = pmd_tm_node_stats_read,
3452 };