net/softnic: add traffic manager object
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21         TAILQ_INIT(&p->tmgr_port_list);
22
23         return 0;
24 }
25
26 void
27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29         for ( ; ; ) {
30                 struct softnic_tmgr_port *tmgr_port;
31
32                 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33                 if (tmgr_port == NULL)
34                         break;
35
36                 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37                 free(tmgr_port);
38         }
39 }
40
41 struct softnic_tmgr_port *
42 softnic_tmgr_port_find(struct pmd_internals *p,
43         const char *name)
44 {
45         struct softnic_tmgr_port *tmgr_port;
46
47         if (name == NULL)
48                 return NULL;
49
50         TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
51                 if (strcmp(tmgr_port->name, name) == 0)
52                         return tmgr_port;
53
54         return NULL;
55 }
56
57 struct softnic_tmgr_port *
58 softnic_tmgr_port_create(struct pmd_internals *p,
59         const char *name,
60         struct rte_sched_port *sched)
61 {
62         struct softnic_tmgr_port *tmgr_port;
63
64         /* Check input params */
65         if (name == NULL ||
66                 softnic_tmgr_port_find(p, name) ||
67                 sched == NULL)
68                 return NULL;
69
70         /* Resource */
71
72         /* Node allocation */
73         tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
74         if (tmgr_port == NULL)
75                 return NULL;
76
77         /* Node fill in */
78         strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
79         tmgr_port->s = sched;
80
81         /* Node add to list */
82         TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
83
84         return tmgr_port;
85 }
86
87 static void
88 tm_hierarchy_init(struct pmd_internals *p)
89 {
90         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
91
92         /* Initialize shaper profile list */
93         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
94
95         /* Initialize shared shaper list */
96         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
97
98         /* Initialize wred profile list */
99         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
100
101         /* Initialize TM node list */
102         TAILQ_INIT(&p->soft.tm.h.nodes);
103 }
104
105 static void
106 tm_hierarchy_uninit(struct pmd_internals *p)
107 {
108         /* Remove all nodes*/
109         for ( ; ; ) {
110                 struct tm_node *tm_node;
111
112                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
113                 if (tm_node == NULL)
114                         break;
115
116                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
117                 free(tm_node);
118         }
119
120         /* Remove all WRED profiles */
121         for ( ; ; ) {
122                 struct tm_wred_profile *wred_profile;
123
124                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
125                 if (wred_profile == NULL)
126                         break;
127
128                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
129                 free(wred_profile);
130         }
131
132         /* Remove all shared shapers */
133         for ( ; ; ) {
134                 struct tm_shared_shaper *shared_shaper;
135
136                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
137                 if (shared_shaper == NULL)
138                         break;
139
140                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
141                 free(shared_shaper);
142         }
143
144         /* Remove all shaper profiles */
145         for ( ; ; ) {
146                 struct tm_shaper_profile *shaper_profile;
147
148                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
149                 if (shaper_profile == NULL)
150                         break;
151
152                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
153                         shaper_profile, node);
154                 free(shaper_profile);
155         }
156
157         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
158 }
159
160 int
161 tm_init(struct pmd_internals *p)
162 {
163         tm_hierarchy_init(p);
164
165         return 0;
166 }
167
168 void
169 tm_free(struct pmd_internals *p)
170 {
171         tm_hierarchy_uninit(p);
172 }
173
174 int
175 tm_start(struct pmd_internals *p)
176 {
177         struct softnic_tmgr_port *tmgr_port;
178         struct tm_params *t = &p->soft.tm.params;
179         struct rte_sched_port *sched;
180         uint32_t n_subports, subport_id;
181         int status;
182
183         /* Is hierarchy frozen? */
184         if (p->soft.tm.hierarchy_frozen == 0)
185                 return -1;
186
187         /* Port */
188         sched = rte_sched_port_config(&t->port_params);
189         if (sched == NULL)
190                 return -1;
191
192         /* Subport */
193         n_subports = t->port_params.n_subports_per_port;
194         for (subport_id = 0; subport_id < n_subports; subport_id++) {
195                 uint32_t n_pipes_per_subport =
196                         t->port_params.n_pipes_per_subport;
197                 uint32_t pipe_id;
198
199                 status = rte_sched_subport_config(sched,
200                         subport_id,
201                         &t->subport_params[subport_id]);
202                 if (status) {
203                         rte_sched_port_free(sched);
204                         return -1;
205                 }
206
207                 /* Pipe */
208                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
209                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
210                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
211                                 pipe_id;
212                         int profile_id = t->pipe_to_profile[pos];
213
214                         if (profile_id < 0)
215                                 continue;
216
217                         status = rte_sched_pipe_config(sched,
218                                 subport_id,
219                                 pipe_id,
220                                 profile_id);
221                         if (status) {
222                                 rte_sched_port_free(sched);
223                                 return -1;
224                         }
225                 }
226         }
227
228         tmgr_port = softnic_tmgr_port_create(p, "TMGR", sched);
229         if (tmgr_port == NULL) {
230                 rte_sched_port_free(sched);
231                 return -1;
232         }
233
234         /* Commit */
235         p->soft.tm.sched = sched;
236
237         return 0;
238 }
239
240 void
241 tm_stop(struct pmd_internals *p)
242 {
243         if (p->soft.tm.sched) {
244                 rte_sched_port_free(p->soft.tm.sched);
245                 p->soft.tm.sched = NULL;
246         }
247         /* Unfreeze hierarchy */
248         p->soft.tm.hierarchy_frozen = 0;
249 }
250
251 static struct tm_shaper_profile *
252 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
253 {
254         struct pmd_internals *p = dev->data->dev_private;
255         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
256         struct tm_shaper_profile *sp;
257
258         TAILQ_FOREACH(sp, spl, node)
259                 if (shaper_profile_id == sp->shaper_profile_id)
260                         return sp;
261
262         return NULL;
263 }
264
265 static struct tm_shared_shaper *
266 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
267 {
268         struct pmd_internals *p = dev->data->dev_private;
269         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
270         struct tm_shared_shaper *ss;
271
272         TAILQ_FOREACH(ss, ssl, node)
273                 if (shared_shaper_id == ss->shared_shaper_id)
274                         return ss;
275
276         return NULL;
277 }
278
279 static struct tm_wred_profile *
280 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
281 {
282         struct pmd_internals *p = dev->data->dev_private;
283         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
284         struct tm_wred_profile *wp;
285
286         TAILQ_FOREACH(wp, wpl, node)
287                 if (wred_profile_id == wp->wred_profile_id)
288                         return wp;
289
290         return NULL;
291 }
292
293 static struct tm_node *
294 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
295 {
296         struct pmd_internals *p = dev->data->dev_private;
297         struct tm_node_list *nl = &p->soft.tm.h.nodes;
298         struct tm_node *n;
299
300         TAILQ_FOREACH(n, nl, node)
301                 if (n->node_id == node_id)
302                         return n;
303
304         return NULL;
305 }
306
307 static struct tm_node *
308 tm_root_node_present(struct rte_eth_dev *dev)
309 {
310         struct pmd_internals *p = dev->data->dev_private;
311         struct tm_node_list *nl = &p->soft.tm.h.nodes;
312         struct tm_node *n;
313
314         TAILQ_FOREACH(n, nl, node)
315                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
316                         return n;
317
318         return NULL;
319 }
320
321 static uint32_t
322 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
323 {
324         struct pmd_internals *p = dev->data->dev_private;
325         struct tm_node_list *nl = &p->soft.tm.h.nodes;
326         struct tm_node *ns;
327         uint32_t subport_id;
328
329         subport_id = 0;
330         TAILQ_FOREACH(ns, nl, node) {
331                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
332                         continue;
333
334                 if (ns->node_id == subport_node->node_id)
335                         return subport_id;
336
337                 subport_id++;
338         }
339
340         return UINT32_MAX;
341 }
342
343 static uint32_t
344 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
345 {
346         struct pmd_internals *p = dev->data->dev_private;
347         struct tm_node_list *nl = &p->soft.tm.h.nodes;
348         struct tm_node *np;
349         uint32_t pipe_id;
350
351         pipe_id = 0;
352         TAILQ_FOREACH(np, nl, node) {
353                 if (np->level != TM_NODE_LEVEL_PIPE ||
354                         np->parent_node_id != pipe_node->parent_node_id)
355                         continue;
356
357                 if (np->node_id == pipe_node->node_id)
358                         return pipe_id;
359
360                 pipe_id++;
361         }
362
363         return UINT32_MAX;
364 }
365
366 static uint32_t
367 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
368 {
369         return tc_node->priority;
370 }
371
372 static uint32_t
373 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
374 {
375         struct pmd_internals *p = dev->data->dev_private;
376         struct tm_node_list *nl = &p->soft.tm.h.nodes;
377         struct tm_node *nq;
378         uint32_t queue_id;
379
380         queue_id = 0;
381         TAILQ_FOREACH(nq, nl, node) {
382                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
383                         nq->parent_node_id != queue_node->parent_node_id)
384                         continue;
385
386                 if (nq->node_id == queue_node->node_id)
387                         return queue_id;
388
389                 queue_id++;
390         }
391
392         return UINT32_MAX;
393 }
394
395 static uint32_t
396 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
397 {
398         struct pmd_internals *p = dev->data->dev_private;
399         uint32_t n_queues_max = p->params.tm.n_queues;
400         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
401         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
402         uint32_t n_subports_max = n_pipes_max;
403         uint32_t n_root_max = 1;
404
405         switch (level) {
406         case TM_NODE_LEVEL_PORT:
407                 return n_root_max;
408         case TM_NODE_LEVEL_SUBPORT:
409                 return n_subports_max;
410         case TM_NODE_LEVEL_PIPE:
411                 return n_pipes_max;
412         case TM_NODE_LEVEL_TC:
413                 return n_tc_max;
414         case TM_NODE_LEVEL_QUEUE:
415         default:
416                 return n_queues_max;
417         }
418 }
419
420 /* Traffic manager node type get */
421 static int
422 pmd_tm_node_type_get(struct rte_eth_dev *dev,
423         uint32_t node_id,
424         int *is_leaf,
425         struct rte_tm_error *error)
426 {
427         struct pmd_internals *p = dev->data->dev_private;
428
429         if (is_leaf == NULL)
430                 return -rte_tm_error_set(error,
431                    EINVAL,
432                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
433                    NULL,
434                    rte_strerror(EINVAL));
435
436         if (node_id == RTE_TM_NODE_ID_NULL ||
437                 (tm_node_search(dev, node_id) == NULL))
438                 return -rte_tm_error_set(error,
439                    EINVAL,
440                    RTE_TM_ERROR_TYPE_NODE_ID,
441                    NULL,
442                    rte_strerror(EINVAL));
443
444         *is_leaf = node_id < p->params.tm.n_queues;
445
446         return 0;
447 }
448
449 #ifdef RTE_SCHED_RED
450 #define WRED_SUPPORTED                                          1
451 #else
452 #define WRED_SUPPORTED                                          0
453 #endif
454
455 #define STATS_MASK_DEFAULT                                      \
456         (RTE_TM_STATS_N_PKTS |                                  \
457         RTE_TM_STATS_N_BYTES |                                  \
458         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
459         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
460
461 #define STATS_MASK_QUEUE                                                \
462         (STATS_MASK_DEFAULT |                                   \
463         RTE_TM_STATS_N_PKTS_QUEUED)
464
465 static const struct rte_tm_capabilities tm_cap = {
466         .n_nodes_max = UINT32_MAX,
467         .n_levels_max = TM_NODE_LEVEL_MAX,
468
469         .non_leaf_nodes_identical = 0,
470         .leaf_nodes_identical = 1,
471
472         .shaper_n_max = UINT32_MAX,
473         .shaper_private_n_max = UINT32_MAX,
474         .shaper_private_dual_rate_n_max = 0,
475         .shaper_private_rate_min = 1,
476         .shaper_private_rate_max = UINT32_MAX,
477
478         .shaper_shared_n_max = UINT32_MAX,
479         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
480         .shaper_shared_n_shapers_per_node_max = 1,
481         .shaper_shared_dual_rate_n_max = 0,
482         .shaper_shared_rate_min = 1,
483         .shaper_shared_rate_max = UINT32_MAX,
484
485         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
486         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
487
488         .sched_n_children_max = UINT32_MAX,
489         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
490         .sched_wfq_n_children_per_group_max = UINT32_MAX,
491         .sched_wfq_n_groups_max = 1,
492         .sched_wfq_weight_max = UINT32_MAX,
493
494         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
495         .cman_wred_byte_mode_supported = 0,
496         .cman_head_drop_supported = 0,
497         .cman_wred_context_n_max = 0,
498         .cman_wred_context_private_n_max = 0,
499         .cman_wred_context_shared_n_max = 0,
500         .cman_wred_context_shared_n_nodes_per_context_max = 0,
501         .cman_wred_context_shared_n_contexts_per_node_max = 0,
502
503         .mark_vlan_dei_supported = {0, 0, 0},
504         .mark_ip_ecn_tcp_supported = {0, 0, 0},
505         .mark_ip_ecn_sctp_supported = {0, 0, 0},
506         .mark_ip_dscp_supported = {0, 0, 0},
507
508         .dynamic_update_mask = 0,
509
510         .stats_mask = STATS_MASK_QUEUE,
511 };
512
513 /* Traffic manager capabilities get */
514 static int
515 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
516         struct rte_tm_capabilities *cap,
517         struct rte_tm_error *error)
518 {
519         if (cap == NULL)
520                 return -rte_tm_error_set(error,
521                    EINVAL,
522                    RTE_TM_ERROR_TYPE_CAPABILITIES,
523                    NULL,
524                    rte_strerror(EINVAL));
525
526         memcpy(cap, &tm_cap, sizeof(*cap));
527
528         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
529                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
530                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
531                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
532                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
533
534         cap->shaper_private_n_max =
535                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
536                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
537                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
538                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
539
540         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
541                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
542
543         cap->shaper_n_max = cap->shaper_private_n_max +
544                 cap->shaper_shared_n_max;
545
546         cap->shaper_shared_n_nodes_per_shaper_max =
547                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
548
549         cap->sched_n_children_max = RTE_MAX(
550                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
551                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
552
553         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
554
555         if (WRED_SUPPORTED)
556                 cap->cman_wred_context_private_n_max =
557                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
558
559         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
560                 cap->cman_wred_context_shared_n_max;
561
562         return 0;
563 }
564
565 static const struct rte_tm_level_capabilities tm_level_cap[] = {
566         [TM_NODE_LEVEL_PORT] = {
567                 .n_nodes_max = 1,
568                 .n_nodes_nonleaf_max = 1,
569                 .n_nodes_leaf_max = 0,
570                 .non_leaf_nodes_identical = 1,
571                 .leaf_nodes_identical = 0,
572
573                 {.nonleaf = {
574                         .shaper_private_supported = 1,
575                         .shaper_private_dual_rate_supported = 0,
576                         .shaper_private_rate_min = 1,
577                         .shaper_private_rate_max = UINT32_MAX,
578                         .shaper_shared_n_max = 0,
579
580                         .sched_n_children_max = UINT32_MAX,
581                         .sched_sp_n_priorities_max = 1,
582                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
583                         .sched_wfq_n_groups_max = 1,
584                         .sched_wfq_weight_max = 1,
585
586                         .stats_mask = STATS_MASK_DEFAULT,
587                 } },
588         },
589
590         [TM_NODE_LEVEL_SUBPORT] = {
591                 .n_nodes_max = UINT32_MAX,
592                 .n_nodes_nonleaf_max = UINT32_MAX,
593                 .n_nodes_leaf_max = 0,
594                 .non_leaf_nodes_identical = 1,
595                 .leaf_nodes_identical = 0,
596
597                 {.nonleaf = {
598                         .shaper_private_supported = 1,
599                         .shaper_private_dual_rate_supported = 0,
600                         .shaper_private_rate_min = 1,
601                         .shaper_private_rate_max = UINT32_MAX,
602                         .shaper_shared_n_max = 0,
603
604                         .sched_n_children_max = UINT32_MAX,
605                         .sched_sp_n_priorities_max = 1,
606                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
607                         .sched_wfq_n_groups_max = 1,
608 #ifdef RTE_SCHED_SUBPORT_TC_OV
609                         .sched_wfq_weight_max = UINT32_MAX,
610 #else
611                         .sched_wfq_weight_max = 1,
612 #endif
613                         .stats_mask = STATS_MASK_DEFAULT,
614                 } },
615         },
616
617         [TM_NODE_LEVEL_PIPE] = {
618                 .n_nodes_max = UINT32_MAX,
619                 .n_nodes_nonleaf_max = UINT32_MAX,
620                 .n_nodes_leaf_max = 0,
621                 .non_leaf_nodes_identical = 1,
622                 .leaf_nodes_identical = 0,
623
624                 {.nonleaf = {
625                         .shaper_private_supported = 1,
626                         .shaper_private_dual_rate_supported = 0,
627                         .shaper_private_rate_min = 1,
628                         .shaper_private_rate_max = UINT32_MAX,
629                         .shaper_shared_n_max = 0,
630
631                         .sched_n_children_max =
632                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
633                         .sched_sp_n_priorities_max =
634                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
635                         .sched_wfq_n_children_per_group_max = 1,
636                         .sched_wfq_n_groups_max = 0,
637                         .sched_wfq_weight_max = 1,
638
639                         .stats_mask = STATS_MASK_DEFAULT,
640                 } },
641         },
642
643         [TM_NODE_LEVEL_TC] = {
644                 .n_nodes_max = UINT32_MAX,
645                 .n_nodes_nonleaf_max = UINT32_MAX,
646                 .n_nodes_leaf_max = 0,
647                 .non_leaf_nodes_identical = 1,
648                 .leaf_nodes_identical = 0,
649
650                 {.nonleaf = {
651                         .shaper_private_supported = 1,
652                         .shaper_private_dual_rate_supported = 0,
653                         .shaper_private_rate_min = 1,
654                         .shaper_private_rate_max = UINT32_MAX,
655                         .shaper_shared_n_max = 1,
656
657                         .sched_n_children_max =
658                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
659                         .sched_sp_n_priorities_max = 1,
660                         .sched_wfq_n_children_per_group_max =
661                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
662                         .sched_wfq_n_groups_max = 1,
663                         .sched_wfq_weight_max = UINT32_MAX,
664
665                         .stats_mask = STATS_MASK_DEFAULT,
666                 } },
667         },
668
669         [TM_NODE_LEVEL_QUEUE] = {
670                 .n_nodes_max = UINT32_MAX,
671                 .n_nodes_nonleaf_max = 0,
672                 .n_nodes_leaf_max = UINT32_MAX,
673                 .non_leaf_nodes_identical = 0,
674                 .leaf_nodes_identical = 1,
675
676                 {.leaf = {
677                         .shaper_private_supported = 0,
678                         .shaper_private_dual_rate_supported = 0,
679                         .shaper_private_rate_min = 0,
680                         .shaper_private_rate_max = 0,
681                         .shaper_shared_n_max = 0,
682
683                         .cman_head_drop_supported = 0,
684                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
685                         .cman_wred_byte_mode_supported = 0,
686                         .cman_wred_context_private_supported = WRED_SUPPORTED,
687                         .cman_wred_context_shared_n_max = 0,
688
689                         .stats_mask = STATS_MASK_QUEUE,
690                 } },
691         },
692 };
693
694 /* Traffic manager level capabilities get */
695 static int
696 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
697         uint32_t level_id,
698         struct rte_tm_level_capabilities *cap,
699         struct rte_tm_error *error)
700 {
701         if (cap == NULL)
702                 return -rte_tm_error_set(error,
703                    EINVAL,
704                    RTE_TM_ERROR_TYPE_CAPABILITIES,
705                    NULL,
706                    rte_strerror(EINVAL));
707
708         if (level_id >= TM_NODE_LEVEL_MAX)
709                 return -rte_tm_error_set(error,
710                    EINVAL,
711                    RTE_TM_ERROR_TYPE_LEVEL_ID,
712                    NULL,
713                    rte_strerror(EINVAL));
714
715         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
716
717         switch (level_id) {
718         case TM_NODE_LEVEL_PORT:
719                 cap->nonleaf.sched_n_children_max =
720                         tm_level_get_max_nodes(dev,
721                                 TM_NODE_LEVEL_SUBPORT);
722                 cap->nonleaf.sched_wfq_n_children_per_group_max =
723                         cap->nonleaf.sched_n_children_max;
724                 break;
725
726         case TM_NODE_LEVEL_SUBPORT:
727                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
728                         TM_NODE_LEVEL_SUBPORT);
729                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
730                 cap->nonleaf.sched_n_children_max =
731                         tm_level_get_max_nodes(dev,
732                                 TM_NODE_LEVEL_PIPE);
733                 cap->nonleaf.sched_wfq_n_children_per_group_max =
734                         cap->nonleaf.sched_n_children_max;
735                 break;
736
737         case TM_NODE_LEVEL_PIPE:
738                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
739                         TM_NODE_LEVEL_PIPE);
740                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
741                 break;
742
743         case TM_NODE_LEVEL_TC:
744                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
745                         TM_NODE_LEVEL_TC);
746                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
747                 break;
748
749         case TM_NODE_LEVEL_QUEUE:
750         default:
751                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
752                         TM_NODE_LEVEL_QUEUE);
753                 cap->n_nodes_leaf_max = cap->n_nodes_max;
754                 break;
755         }
756
757         return 0;
758 }
759
760 static const struct rte_tm_node_capabilities tm_node_cap[] = {
761         [TM_NODE_LEVEL_PORT] = {
762                 .shaper_private_supported = 1,
763                 .shaper_private_dual_rate_supported = 0,
764                 .shaper_private_rate_min = 1,
765                 .shaper_private_rate_max = UINT32_MAX,
766                 .shaper_shared_n_max = 0,
767
768                 {.nonleaf = {
769                         .sched_n_children_max = UINT32_MAX,
770                         .sched_sp_n_priorities_max = 1,
771                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
772                         .sched_wfq_n_groups_max = 1,
773                         .sched_wfq_weight_max = 1,
774                 } },
775
776                 .stats_mask = STATS_MASK_DEFAULT,
777         },
778
779         [TM_NODE_LEVEL_SUBPORT] = {
780                 .shaper_private_supported = 1,
781                 .shaper_private_dual_rate_supported = 0,
782                 .shaper_private_rate_min = 1,
783                 .shaper_private_rate_max = UINT32_MAX,
784                 .shaper_shared_n_max = 0,
785
786                 {.nonleaf = {
787                         .sched_n_children_max = UINT32_MAX,
788                         .sched_sp_n_priorities_max = 1,
789                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
790                         .sched_wfq_n_groups_max = 1,
791                         .sched_wfq_weight_max = UINT32_MAX,
792                 } },
793
794                 .stats_mask = STATS_MASK_DEFAULT,
795         },
796
797         [TM_NODE_LEVEL_PIPE] = {
798                 .shaper_private_supported = 1,
799                 .shaper_private_dual_rate_supported = 0,
800                 .shaper_private_rate_min = 1,
801                 .shaper_private_rate_max = UINT32_MAX,
802                 .shaper_shared_n_max = 0,
803
804                 {.nonleaf = {
805                         .sched_n_children_max =
806                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
807                         .sched_sp_n_priorities_max =
808                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
809                         .sched_wfq_n_children_per_group_max = 1,
810                         .sched_wfq_n_groups_max = 0,
811                         .sched_wfq_weight_max = 1,
812                 } },
813
814                 .stats_mask = STATS_MASK_DEFAULT,
815         },
816
817         [TM_NODE_LEVEL_TC] = {
818                 .shaper_private_supported = 1,
819                 .shaper_private_dual_rate_supported = 0,
820                 .shaper_private_rate_min = 1,
821                 .shaper_private_rate_max = UINT32_MAX,
822                 .shaper_shared_n_max = 1,
823
824                 {.nonleaf = {
825                         .sched_n_children_max =
826                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
827                         .sched_sp_n_priorities_max = 1,
828                         .sched_wfq_n_children_per_group_max =
829                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
830                         .sched_wfq_n_groups_max = 1,
831                         .sched_wfq_weight_max = UINT32_MAX,
832                 } },
833
834                 .stats_mask = STATS_MASK_DEFAULT,
835         },
836
837         [TM_NODE_LEVEL_QUEUE] = {
838                 .shaper_private_supported = 0,
839                 .shaper_private_dual_rate_supported = 0,
840                 .shaper_private_rate_min = 0,
841                 .shaper_private_rate_max = 0,
842                 .shaper_shared_n_max = 0,
843
844
845                 {.leaf = {
846                         .cman_head_drop_supported = 0,
847                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
848                         .cman_wred_byte_mode_supported = 0,
849                         .cman_wred_context_private_supported = WRED_SUPPORTED,
850                         .cman_wred_context_shared_n_max = 0,
851                 } },
852
853                 .stats_mask = STATS_MASK_QUEUE,
854         },
855 };
856
857 /* Traffic manager node capabilities get */
858 static int
859 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
860         uint32_t node_id,
861         struct rte_tm_node_capabilities *cap,
862         struct rte_tm_error *error)
863 {
864         struct tm_node *tm_node;
865
866         if (cap == NULL)
867                 return -rte_tm_error_set(error,
868                    EINVAL,
869                    RTE_TM_ERROR_TYPE_CAPABILITIES,
870                    NULL,
871                    rte_strerror(EINVAL));
872
873         tm_node = tm_node_search(dev, node_id);
874         if (tm_node == NULL)
875                 return -rte_tm_error_set(error,
876                    EINVAL,
877                    RTE_TM_ERROR_TYPE_NODE_ID,
878                    NULL,
879                    rte_strerror(EINVAL));
880
881         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
882
883         switch (tm_node->level) {
884         case TM_NODE_LEVEL_PORT:
885                 cap->nonleaf.sched_n_children_max =
886                         tm_level_get_max_nodes(dev,
887                                 TM_NODE_LEVEL_SUBPORT);
888                 cap->nonleaf.sched_wfq_n_children_per_group_max =
889                         cap->nonleaf.sched_n_children_max;
890                 break;
891
892         case TM_NODE_LEVEL_SUBPORT:
893                 cap->nonleaf.sched_n_children_max =
894                         tm_level_get_max_nodes(dev,
895                                 TM_NODE_LEVEL_PIPE);
896                 cap->nonleaf.sched_wfq_n_children_per_group_max =
897                         cap->nonleaf.sched_n_children_max;
898                 break;
899
900         case TM_NODE_LEVEL_PIPE:
901         case TM_NODE_LEVEL_TC:
902         case TM_NODE_LEVEL_QUEUE:
903         default:
904                 break;
905         }
906
907         return 0;
908 }
909
910 static int
911 shaper_profile_check(struct rte_eth_dev *dev,
912         uint32_t shaper_profile_id,
913         struct rte_tm_shaper_params *profile,
914         struct rte_tm_error *error)
915 {
916         struct tm_shaper_profile *sp;
917
918         /* Shaper profile ID must not be NONE. */
919         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
920                 return -rte_tm_error_set(error,
921                         EINVAL,
922                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
923                         NULL,
924                         rte_strerror(EINVAL));
925
926         /* Shaper profile must not exist. */
927         sp = tm_shaper_profile_search(dev, shaper_profile_id);
928         if (sp)
929                 return -rte_tm_error_set(error,
930                         EEXIST,
931                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
932                         NULL,
933                         rte_strerror(EEXIST));
934
935         /* Profile must not be NULL. */
936         if (profile == NULL)
937                 return -rte_tm_error_set(error,
938                         EINVAL,
939                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
940                         NULL,
941                         rte_strerror(EINVAL));
942
943         /* Peak rate: non-zero, 32-bit */
944         if (profile->peak.rate == 0 ||
945                 profile->peak.rate >= UINT32_MAX)
946                 return -rte_tm_error_set(error,
947                         EINVAL,
948                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
949                         NULL,
950                         rte_strerror(EINVAL));
951
952         /* Peak size: non-zero, 32-bit */
953         if (profile->peak.size == 0 ||
954                 profile->peak.size >= UINT32_MAX)
955                 return -rte_tm_error_set(error,
956                         EINVAL,
957                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
958                         NULL,
959                         rte_strerror(EINVAL));
960
961         /* Dual-rate profiles are not supported. */
962         if (profile->committed.rate != 0)
963                 return -rte_tm_error_set(error,
964                         EINVAL,
965                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
966                         NULL,
967                         rte_strerror(EINVAL));
968
969         /* Packet length adjust: 24 bytes */
970         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
971                 return -rte_tm_error_set(error,
972                         EINVAL,
973                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
974                         NULL,
975                         rte_strerror(EINVAL));
976
977         return 0;
978 }
979
980 /* Traffic manager shaper profile add */
981 static int
982 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
983         uint32_t shaper_profile_id,
984         struct rte_tm_shaper_params *profile,
985         struct rte_tm_error *error)
986 {
987         struct pmd_internals *p = dev->data->dev_private;
988         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
989         struct tm_shaper_profile *sp;
990         int status;
991
992         /* Check input params */
993         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
994         if (status)
995                 return status;
996
997         /* Memory allocation */
998         sp = calloc(1, sizeof(struct tm_shaper_profile));
999         if (sp == NULL)
1000                 return -rte_tm_error_set(error,
1001                         ENOMEM,
1002                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1003                         NULL,
1004                         rte_strerror(ENOMEM));
1005
1006         /* Fill in */
1007         sp->shaper_profile_id = shaper_profile_id;
1008         memcpy(&sp->params, profile, sizeof(sp->params));
1009
1010         /* Add to list */
1011         TAILQ_INSERT_TAIL(spl, sp, node);
1012         p->soft.tm.h.n_shaper_profiles++;
1013
1014         return 0;
1015 }
1016
1017 /* Traffic manager shaper profile delete */
1018 static int
1019 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1020         uint32_t shaper_profile_id,
1021         struct rte_tm_error *error)
1022 {
1023         struct pmd_internals *p = dev->data->dev_private;
1024         struct tm_shaper_profile *sp;
1025
1026         /* Check existing */
1027         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1028         if (sp == NULL)
1029                 return -rte_tm_error_set(error,
1030                         EINVAL,
1031                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1032                         NULL,
1033                         rte_strerror(EINVAL));
1034
1035         /* Check unused */
1036         if (sp->n_users)
1037                 return -rte_tm_error_set(error,
1038                         EBUSY,
1039                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1040                         NULL,
1041                         rte_strerror(EBUSY));
1042
1043         /* Remove from list */
1044         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1045         p->soft.tm.h.n_shaper_profiles--;
1046         free(sp);
1047
1048         return 0;
1049 }
1050
1051 static struct tm_node *
1052 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1053         struct tm_shared_shaper *ss)
1054 {
1055         struct pmd_internals *p = dev->data->dev_private;
1056         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1057         struct tm_node *n;
1058
1059         /* Subport: each TC uses shared shaper  */
1060         TAILQ_FOREACH(n, nl, node) {
1061                 if (n->level != TM_NODE_LEVEL_TC ||
1062                         n->params.n_shared_shapers == 0 ||
1063                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1064                         continue;
1065
1066                 return n;
1067         }
1068
1069         return NULL;
1070 }
1071
1072 static int
1073 update_subport_tc_rate(struct rte_eth_dev *dev,
1074         struct tm_node *nt,
1075         struct tm_shared_shaper *ss,
1076         struct tm_shaper_profile *sp_new)
1077 {
1078         struct pmd_internals *p = dev->data->dev_private;
1079         uint32_t tc_id = tm_node_tc_id(dev, nt);
1080
1081         struct tm_node *np = nt->parent_node;
1082
1083         struct tm_node *ns = np->parent_node;
1084         uint32_t subport_id = tm_node_subport_id(dev, ns);
1085
1086         struct rte_sched_subport_params subport_params;
1087
1088         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1089                 ss->shaper_profile_id);
1090
1091         /* Derive new subport configuration. */
1092         memcpy(&subport_params,
1093                 &p->soft.tm.params.subport_params[subport_id],
1094                 sizeof(subport_params));
1095         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1096
1097         /* Update the subport configuration. */
1098         if (rte_sched_subport_config(p->soft.tm.sched,
1099                 subport_id, &subport_params))
1100                 return -1;
1101
1102         /* Commit changes. */
1103         sp_old->n_users--;
1104
1105         ss->shaper_profile_id = sp_new->shaper_profile_id;
1106         sp_new->n_users++;
1107
1108         memcpy(&p->soft.tm.params.subport_params[subport_id],
1109                 &subport_params,
1110                 sizeof(subport_params));
1111
1112         return 0;
1113 }
1114
1115 /* Traffic manager shared shaper add/update */
1116 static int
1117 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1118         uint32_t shared_shaper_id,
1119         uint32_t shaper_profile_id,
1120         struct rte_tm_error *error)
1121 {
1122         struct pmd_internals *p = dev->data->dev_private;
1123         struct tm_shared_shaper *ss;
1124         struct tm_shaper_profile *sp;
1125         struct tm_node *nt;
1126
1127         /* Shaper profile must be valid. */
1128         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1129         if (sp == NULL)
1130                 return -rte_tm_error_set(error,
1131                         EINVAL,
1132                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1133                         NULL,
1134                         rte_strerror(EINVAL));
1135
1136         /**
1137          * Add new shared shaper
1138          */
1139         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1140         if (ss == NULL) {
1141                 struct tm_shared_shaper_list *ssl =
1142                         &p->soft.tm.h.shared_shapers;
1143
1144                 /* Hierarchy must not be frozen */
1145                 if (p->soft.tm.hierarchy_frozen)
1146                         return -rte_tm_error_set(error,
1147                                 EBUSY,
1148                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1149                                 NULL,
1150                                 rte_strerror(EBUSY));
1151
1152                 /* Memory allocation */
1153                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1154                 if (ss == NULL)
1155                         return -rte_tm_error_set(error,
1156                                 ENOMEM,
1157                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1158                                 NULL,
1159                                 rte_strerror(ENOMEM));
1160
1161                 /* Fill in */
1162                 ss->shared_shaper_id = shared_shaper_id;
1163                 ss->shaper_profile_id = shaper_profile_id;
1164
1165                 /* Add to list */
1166                 TAILQ_INSERT_TAIL(ssl, ss, node);
1167                 p->soft.tm.h.n_shared_shapers++;
1168
1169                 return 0;
1170         }
1171
1172         /**
1173          * Update existing shared shaper
1174          */
1175         /* Hierarchy must be frozen (run-time update) */
1176         if (p->soft.tm.hierarchy_frozen == 0)
1177                 return -rte_tm_error_set(error,
1178                         EBUSY,
1179                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1180                         NULL,
1181                         rte_strerror(EBUSY));
1182
1183
1184         /* Propagate change. */
1185         nt = tm_shared_shaper_get_tc(dev, ss);
1186         if (update_subport_tc_rate(dev, nt, ss, sp))
1187                 return -rte_tm_error_set(error,
1188                         EINVAL,
1189                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1190                         NULL,
1191                         rte_strerror(EINVAL));
1192
1193         return 0;
1194 }
1195
1196 /* Traffic manager shared shaper delete */
1197 static int
1198 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1199         uint32_t shared_shaper_id,
1200         struct rte_tm_error *error)
1201 {
1202         struct pmd_internals *p = dev->data->dev_private;
1203         struct tm_shared_shaper *ss;
1204
1205         /* Check existing */
1206         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1207         if (ss == NULL)
1208                 return -rte_tm_error_set(error,
1209                         EINVAL,
1210                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1211                         NULL,
1212                         rte_strerror(EINVAL));
1213
1214         /* Check unused */
1215         if (ss->n_users)
1216                 return -rte_tm_error_set(error,
1217                         EBUSY,
1218                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1219                         NULL,
1220                         rte_strerror(EBUSY));
1221
1222         /* Remove from list */
1223         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1224         p->soft.tm.h.n_shared_shapers--;
1225         free(ss);
1226
1227         return 0;
1228 }
1229
1230 static int
1231 wred_profile_check(struct rte_eth_dev *dev,
1232         uint32_t wred_profile_id,
1233         struct rte_tm_wred_params *profile,
1234         struct rte_tm_error *error)
1235 {
1236         struct tm_wred_profile *wp;
1237         enum rte_tm_color color;
1238
1239         /* WRED profile ID must not be NONE. */
1240         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1241                 return -rte_tm_error_set(error,
1242                         EINVAL,
1243                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1244                         NULL,
1245                         rte_strerror(EINVAL));
1246
1247         /* WRED profile must not exist. */
1248         wp = tm_wred_profile_search(dev, wred_profile_id);
1249         if (wp)
1250                 return -rte_tm_error_set(error,
1251                         EEXIST,
1252                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1253                         NULL,
1254                         rte_strerror(EEXIST));
1255
1256         /* Profile must not be NULL. */
1257         if (profile == NULL)
1258                 return -rte_tm_error_set(error,
1259                         EINVAL,
1260                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1261                         NULL,
1262                         rte_strerror(EINVAL));
1263
1264         /* WRED profile should be in packet mode */
1265         if (profile->packet_mode == 0)
1266                 return -rte_tm_error_set(error,
1267                         ENOTSUP,
1268                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1269                         NULL,
1270                         rte_strerror(ENOTSUP));
1271
1272         /* min_th <= max_th, max_th > 0  */
1273         for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1274                 uint32_t min_th = profile->red_params[color].min_th;
1275                 uint32_t max_th = profile->red_params[color].max_th;
1276
1277                 if (min_th > max_th ||
1278                         max_th == 0 ||
1279                         min_th > UINT16_MAX ||
1280                         max_th > UINT16_MAX)
1281                         return -rte_tm_error_set(error,
1282                                 EINVAL,
1283                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1284                                 NULL,
1285                                 rte_strerror(EINVAL));
1286         }
1287
1288         return 0;
1289 }
1290
1291 /* Traffic manager WRED profile add */
1292 static int
1293 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1294         uint32_t wred_profile_id,
1295         struct rte_tm_wred_params *profile,
1296         struct rte_tm_error *error)
1297 {
1298         struct pmd_internals *p = dev->data->dev_private;
1299         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1300         struct tm_wred_profile *wp;
1301         int status;
1302
1303         /* Check input params */
1304         status = wred_profile_check(dev, wred_profile_id, profile, error);
1305         if (status)
1306                 return status;
1307
1308         /* Memory allocation */
1309         wp = calloc(1, sizeof(struct tm_wred_profile));
1310         if (wp == NULL)
1311                 return -rte_tm_error_set(error,
1312                         ENOMEM,
1313                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1314                         NULL,
1315                         rte_strerror(ENOMEM));
1316
1317         /* Fill in */
1318         wp->wred_profile_id = wred_profile_id;
1319         memcpy(&wp->params, profile, sizeof(wp->params));
1320
1321         /* Add to list */
1322         TAILQ_INSERT_TAIL(wpl, wp, node);
1323         p->soft.tm.h.n_wred_profiles++;
1324
1325         return 0;
1326 }
1327
1328 /* Traffic manager WRED profile delete */
1329 static int
1330 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1331         uint32_t wred_profile_id,
1332         struct rte_tm_error *error)
1333 {
1334         struct pmd_internals *p = dev->data->dev_private;
1335         struct tm_wred_profile *wp;
1336
1337         /* Check existing */
1338         wp = tm_wred_profile_search(dev, wred_profile_id);
1339         if (wp == NULL)
1340                 return -rte_tm_error_set(error,
1341                         EINVAL,
1342                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1343                         NULL,
1344                         rte_strerror(EINVAL));
1345
1346         /* Check unused */
1347         if (wp->n_users)
1348                 return -rte_tm_error_set(error,
1349                         EBUSY,
1350                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1351                         NULL,
1352                         rte_strerror(EBUSY));
1353
1354         /* Remove from list */
1355         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1356         p->soft.tm.h.n_wred_profiles--;
1357         free(wp);
1358
1359         return 0;
1360 }
1361
1362 static int
1363 node_add_check_port(struct rte_eth_dev *dev,
1364         uint32_t node_id,
1365         uint32_t parent_node_id __rte_unused,
1366         uint32_t priority,
1367         uint32_t weight,
1368         uint32_t level_id __rte_unused,
1369         struct rte_tm_node_params *params,
1370         struct rte_tm_error *error)
1371 {
1372         struct pmd_internals *p = dev->data->dev_private;
1373         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1374                 params->shaper_profile_id);
1375
1376         /* node type: non-leaf */
1377         if (node_id < p->params.tm.n_queues)
1378                 return -rte_tm_error_set(error,
1379                         EINVAL,
1380                         RTE_TM_ERROR_TYPE_NODE_ID,
1381                         NULL,
1382                         rte_strerror(EINVAL));
1383
1384         /* Priority must be 0 */
1385         if (priority != 0)
1386                 return -rte_tm_error_set(error,
1387                         EINVAL,
1388                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1389                         NULL,
1390                         rte_strerror(EINVAL));
1391
1392         /* Weight must be 1 */
1393         if (weight != 1)
1394                 return -rte_tm_error_set(error,
1395                         EINVAL,
1396                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1397                         NULL,
1398                         rte_strerror(EINVAL));
1399
1400         /* Shaper must be valid */
1401         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1402                 sp == NULL)
1403                 return -rte_tm_error_set(error,
1404                         EINVAL,
1405                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1406                         NULL,
1407                         rte_strerror(EINVAL));
1408
1409         /* No shared shapers */
1410         if (params->n_shared_shapers != 0)
1411                 return -rte_tm_error_set(error,
1412                         EINVAL,
1413                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1414                         NULL,
1415                         rte_strerror(EINVAL));
1416
1417         /* Number of SP priorities must be 1 */
1418         if (params->nonleaf.n_sp_priorities != 1)
1419                 return -rte_tm_error_set(error,
1420                         EINVAL,
1421                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1422                         NULL,
1423                         rte_strerror(EINVAL));
1424
1425         /* Stats */
1426         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1427                 return -rte_tm_error_set(error,
1428                         EINVAL,
1429                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1430                         NULL,
1431                         rte_strerror(EINVAL));
1432
1433         return 0;
1434 }
1435
1436 static int
1437 node_add_check_subport(struct rte_eth_dev *dev,
1438         uint32_t node_id,
1439         uint32_t parent_node_id __rte_unused,
1440         uint32_t priority,
1441         uint32_t weight,
1442         uint32_t level_id __rte_unused,
1443         struct rte_tm_node_params *params,
1444         struct rte_tm_error *error)
1445 {
1446         struct pmd_internals *p = dev->data->dev_private;
1447
1448         /* node type: non-leaf */
1449         if (node_id < p->params.tm.n_queues)
1450                 return -rte_tm_error_set(error,
1451                         EINVAL,
1452                         RTE_TM_ERROR_TYPE_NODE_ID,
1453                         NULL,
1454                         rte_strerror(EINVAL));
1455
1456         /* Priority must be 0 */
1457         if (priority != 0)
1458                 return -rte_tm_error_set(error,
1459                         EINVAL,
1460                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1461                         NULL,
1462                         rte_strerror(EINVAL));
1463
1464         /* Weight must be 1 */
1465         if (weight != 1)
1466                 return -rte_tm_error_set(error,
1467                         EINVAL,
1468                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1469                         NULL,
1470                         rte_strerror(EINVAL));
1471
1472         /* Shaper must be valid */
1473         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1474                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1475                 return -rte_tm_error_set(error,
1476                         EINVAL,
1477                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1478                         NULL,
1479                         rte_strerror(EINVAL));
1480
1481         /* No shared shapers */
1482         if (params->n_shared_shapers != 0)
1483                 return -rte_tm_error_set(error,
1484                         EINVAL,
1485                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1486                         NULL,
1487                         rte_strerror(EINVAL));
1488
1489         /* Number of SP priorities must be 1 */
1490         if (params->nonleaf.n_sp_priorities != 1)
1491                 return -rte_tm_error_set(error,
1492                         EINVAL,
1493                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1494                         NULL,
1495                         rte_strerror(EINVAL));
1496
1497         /* Stats */
1498         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1499                 return -rte_tm_error_set(error,
1500                         EINVAL,
1501                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1502                         NULL,
1503                         rte_strerror(EINVAL));
1504
1505         return 0;
1506 }
1507
1508 static int
1509 node_add_check_pipe(struct rte_eth_dev *dev,
1510         uint32_t node_id,
1511         uint32_t parent_node_id __rte_unused,
1512         uint32_t priority,
1513         uint32_t weight __rte_unused,
1514         uint32_t level_id __rte_unused,
1515         struct rte_tm_node_params *params,
1516         struct rte_tm_error *error)
1517 {
1518         struct pmd_internals *p = dev->data->dev_private;
1519
1520         /* node type: non-leaf */
1521         if (node_id < p->params.tm.n_queues)
1522                 return -rte_tm_error_set(error,
1523                         EINVAL,
1524                         RTE_TM_ERROR_TYPE_NODE_ID,
1525                         NULL,
1526                         rte_strerror(EINVAL));
1527
1528         /* Priority must be 0 */
1529         if (priority != 0)
1530                 return -rte_tm_error_set(error,
1531                         EINVAL,
1532                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1533                         NULL,
1534                         rte_strerror(EINVAL));
1535
1536         /* Shaper must be valid */
1537         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1538                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1539                 return -rte_tm_error_set(error,
1540                         EINVAL,
1541                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1542                         NULL,
1543                         rte_strerror(EINVAL));
1544
1545         /* No shared shapers */
1546         if (params->n_shared_shapers != 0)
1547                 return -rte_tm_error_set(error,
1548                         EINVAL,
1549                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1550                         NULL,
1551                         rte_strerror(EINVAL));
1552
1553         /* Number of SP priorities must be 4 */
1554         if (params->nonleaf.n_sp_priorities !=
1555                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1556                 return -rte_tm_error_set(error,
1557                         EINVAL,
1558                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1559                         NULL,
1560                         rte_strerror(EINVAL));
1561
1562         /* WFQ mode must be byte mode */
1563         if (params->nonleaf.wfq_weight_mode != NULL &&
1564                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1565                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1566                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1567                 params->nonleaf.wfq_weight_mode[3] != 0)
1568                 return -rte_tm_error_set(error,
1569                         EINVAL,
1570                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1571                         NULL,
1572                         rte_strerror(EINVAL));
1573
1574         /* Stats */
1575         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1576                 return -rte_tm_error_set(error,
1577                         EINVAL,
1578                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1579                         NULL,
1580                         rte_strerror(EINVAL));
1581
1582         return 0;
1583 }
1584
1585 static int
1586 node_add_check_tc(struct rte_eth_dev *dev,
1587         uint32_t node_id,
1588         uint32_t parent_node_id __rte_unused,
1589         uint32_t priority __rte_unused,
1590         uint32_t weight,
1591         uint32_t level_id __rte_unused,
1592         struct rte_tm_node_params *params,
1593         struct rte_tm_error *error)
1594 {
1595         struct pmd_internals *p = dev->data->dev_private;
1596
1597         /* node type: non-leaf */
1598         if (node_id < p->params.tm.n_queues)
1599                 return -rte_tm_error_set(error,
1600                         EINVAL,
1601                         RTE_TM_ERROR_TYPE_NODE_ID,
1602                         NULL,
1603                         rte_strerror(EINVAL));
1604
1605         /* Weight must be 1 */
1606         if (weight != 1)
1607                 return -rte_tm_error_set(error,
1608                         EINVAL,
1609                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1610                         NULL,
1611                         rte_strerror(EINVAL));
1612
1613         /* Shaper must be valid */
1614         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1615                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1616                 return -rte_tm_error_set(error,
1617                         EINVAL,
1618                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1619                         NULL,
1620                         rte_strerror(EINVAL));
1621
1622         /* Single valid shared shaper */
1623         if (params->n_shared_shapers > 1)
1624                 return -rte_tm_error_set(error,
1625                         EINVAL,
1626                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1627                         NULL,
1628                         rte_strerror(EINVAL));
1629
1630         if (params->n_shared_shapers == 1 &&
1631                 (params->shared_shaper_id == NULL ||
1632                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1633                 return -rte_tm_error_set(error,
1634                         EINVAL,
1635                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1636                         NULL,
1637                         rte_strerror(EINVAL));
1638
1639         /* Number of priorities must be 1 */
1640         if (params->nonleaf.n_sp_priorities != 1)
1641                 return -rte_tm_error_set(error,
1642                         EINVAL,
1643                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1644                         NULL,
1645                         rte_strerror(EINVAL));
1646
1647         /* Stats */
1648         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1649                 return -rte_tm_error_set(error,
1650                         EINVAL,
1651                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1652                         NULL,
1653                         rte_strerror(EINVAL));
1654
1655         return 0;
1656 }
1657
1658 static int
1659 node_add_check_queue(struct rte_eth_dev *dev,
1660         uint32_t node_id,
1661         uint32_t parent_node_id __rte_unused,
1662         uint32_t priority,
1663         uint32_t weight __rte_unused,
1664         uint32_t level_id __rte_unused,
1665         struct rte_tm_node_params *params,
1666         struct rte_tm_error *error)
1667 {
1668         struct pmd_internals *p = dev->data->dev_private;
1669
1670         /* node type: leaf */
1671         if (node_id >= p->params.tm.n_queues)
1672                 return -rte_tm_error_set(error,
1673                         EINVAL,
1674                         RTE_TM_ERROR_TYPE_NODE_ID,
1675                         NULL,
1676                         rte_strerror(EINVAL));
1677
1678         /* Priority must be 0 */
1679         if (priority != 0)
1680                 return -rte_tm_error_set(error,
1681                         EINVAL,
1682                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1683                         NULL,
1684                         rte_strerror(EINVAL));
1685
1686         /* No shaper */
1687         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1688                 return -rte_tm_error_set(error,
1689                         EINVAL,
1690                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1691                         NULL,
1692                         rte_strerror(EINVAL));
1693
1694         /* No shared shapers */
1695         if (params->n_shared_shapers != 0)
1696                 return -rte_tm_error_set(error,
1697                         EINVAL,
1698                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1699                         NULL,
1700                         rte_strerror(EINVAL));
1701
1702         /* Congestion management must not be head drop */
1703         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1704                 return -rte_tm_error_set(error,
1705                         EINVAL,
1706                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1707                         NULL,
1708                         rte_strerror(EINVAL));
1709
1710         /* Congestion management set to WRED */
1711         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1712                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1713                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1714                         wred_profile_id);
1715
1716                 /* WRED profile (for private WRED context) must be valid */
1717                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1718                         wp == NULL)
1719                         return -rte_tm_error_set(error,
1720                                 EINVAL,
1721                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1722                                 NULL,
1723                                 rte_strerror(EINVAL));
1724
1725                 /* No shared WRED contexts */
1726                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1727                         return -rte_tm_error_set(error,
1728                                 EINVAL,
1729                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1730                                 NULL,
1731                                 rte_strerror(EINVAL));
1732         }
1733
1734         /* Stats */
1735         if (params->stats_mask & ~STATS_MASK_QUEUE)
1736                 return -rte_tm_error_set(error,
1737                         EINVAL,
1738                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1739                         NULL,
1740                         rte_strerror(EINVAL));
1741
1742         return 0;
1743 }
1744
1745 static int
1746 node_add_check(struct rte_eth_dev *dev,
1747         uint32_t node_id,
1748         uint32_t parent_node_id,
1749         uint32_t priority,
1750         uint32_t weight,
1751         uint32_t level_id,
1752         struct rte_tm_node_params *params,
1753         struct rte_tm_error *error)
1754 {
1755         struct tm_node *pn;
1756         uint32_t level;
1757         int status;
1758
1759         /* node_id, parent_node_id:
1760          *    -node_id must not be RTE_TM_NODE_ID_NULL
1761          *    -node_id must not be in use
1762          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1763          *        -root node must not exist
1764          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1765          *        -parent_node_id must be valid
1766          */
1767         if (node_id == RTE_TM_NODE_ID_NULL)
1768                 return -rte_tm_error_set(error,
1769                         EINVAL,
1770                         RTE_TM_ERROR_TYPE_NODE_ID,
1771                         NULL,
1772                         rte_strerror(EINVAL));
1773
1774         if (tm_node_search(dev, node_id))
1775                 return -rte_tm_error_set(error,
1776                         EEXIST,
1777                         RTE_TM_ERROR_TYPE_NODE_ID,
1778                         NULL,
1779                         rte_strerror(EEXIST));
1780
1781         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1782                 pn = NULL;
1783                 if (tm_root_node_present(dev))
1784                         return -rte_tm_error_set(error,
1785                                 EEXIST,
1786                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1787                                 NULL,
1788                                 rte_strerror(EEXIST));
1789         } else {
1790                 pn = tm_node_search(dev, parent_node_id);
1791                 if (pn == NULL)
1792                         return -rte_tm_error_set(error,
1793                                 EINVAL,
1794                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1795                                 NULL,
1796                                 rte_strerror(EINVAL));
1797         }
1798
1799         /* priority: must be 0 .. 3 */
1800         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1801                 return -rte_tm_error_set(error,
1802                         EINVAL,
1803                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1804                         NULL,
1805                         rte_strerror(EINVAL));
1806
1807         /* weight: must be 1 .. 255 */
1808         if (weight == 0 || weight >= UINT8_MAX)
1809                 return -rte_tm_error_set(error,
1810                         EINVAL,
1811                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1812                         NULL,
1813                         rte_strerror(EINVAL));
1814
1815         /* level_id: if valid, then
1816          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1817          *        -level_id must be zero
1818          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1819          *        -level_id must be parent level ID plus one
1820          */
1821         level = (pn == NULL) ? 0 : pn->level + 1;
1822         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1823                 return -rte_tm_error_set(error,
1824                         EINVAL,
1825                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1826                         NULL,
1827                         rte_strerror(EINVAL));
1828
1829         /* params: must not be NULL */
1830         if (params == NULL)
1831                 return -rte_tm_error_set(error,
1832                         EINVAL,
1833                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1834                         NULL,
1835                         rte_strerror(EINVAL));
1836
1837         /* params: per level checks */
1838         switch (level) {
1839         case TM_NODE_LEVEL_PORT:
1840                 status = node_add_check_port(dev, node_id,
1841                         parent_node_id, priority, weight, level_id,
1842                         params, error);
1843                 if (status)
1844                         return status;
1845                 break;
1846
1847         case TM_NODE_LEVEL_SUBPORT:
1848                 status = node_add_check_subport(dev, node_id,
1849                         parent_node_id, priority, weight, level_id,
1850                         params, error);
1851                 if (status)
1852                         return status;
1853                 break;
1854
1855         case TM_NODE_LEVEL_PIPE:
1856                 status = node_add_check_pipe(dev, node_id,
1857                         parent_node_id, priority, weight, level_id,
1858                         params, error);
1859                 if (status)
1860                         return status;
1861                 break;
1862
1863         case TM_NODE_LEVEL_TC:
1864                 status = node_add_check_tc(dev, node_id,
1865                         parent_node_id, priority, weight, level_id,
1866                         params, error);
1867                 if (status)
1868                         return status;
1869                 break;
1870
1871         case TM_NODE_LEVEL_QUEUE:
1872                 status = node_add_check_queue(dev, node_id,
1873                         parent_node_id, priority, weight, level_id,
1874                         params, error);
1875                 if (status)
1876                         return status;
1877                 break;
1878
1879         default:
1880                 return -rte_tm_error_set(error,
1881                         EINVAL,
1882                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1883                         NULL,
1884                         rte_strerror(EINVAL));
1885         }
1886
1887         return 0;
1888 }
1889
1890 /* Traffic manager node add */
1891 static int
1892 pmd_tm_node_add(struct rte_eth_dev *dev,
1893         uint32_t node_id,
1894         uint32_t parent_node_id,
1895         uint32_t priority,
1896         uint32_t weight,
1897         uint32_t level_id,
1898         struct rte_tm_node_params *params,
1899         struct rte_tm_error *error)
1900 {
1901         struct pmd_internals *p = dev->data->dev_private;
1902         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1903         struct tm_node *n;
1904         uint32_t i;
1905         int status;
1906
1907         /* Checks */
1908         if (p->soft.tm.hierarchy_frozen)
1909                 return -rte_tm_error_set(error,
1910                         EBUSY,
1911                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1912                         NULL,
1913                         rte_strerror(EBUSY));
1914
1915         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1916                 level_id, params, error);
1917         if (status)
1918                 return status;
1919
1920         /* Memory allocation */
1921         n = calloc(1, sizeof(struct tm_node));
1922         if (n == NULL)
1923                 return -rte_tm_error_set(error,
1924                         ENOMEM,
1925                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1926                         NULL,
1927                         rte_strerror(ENOMEM));
1928
1929         /* Fill in */
1930         n->node_id = node_id;
1931         n->parent_node_id = parent_node_id;
1932         n->priority = priority;
1933         n->weight = weight;
1934
1935         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1936                 n->parent_node = tm_node_search(dev, parent_node_id);
1937                 n->level = n->parent_node->level + 1;
1938         }
1939
1940         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1941                 n->shaper_profile = tm_shaper_profile_search(dev,
1942                         params->shaper_profile_id);
1943
1944         if (n->level == TM_NODE_LEVEL_QUEUE &&
1945                 params->leaf.cman == RTE_TM_CMAN_WRED)
1946                 n->wred_profile = tm_wred_profile_search(dev,
1947                         params->leaf.wred.wred_profile_id);
1948
1949         memcpy(&n->params, params, sizeof(n->params));
1950
1951         /* Add to list */
1952         TAILQ_INSERT_TAIL(nl, n, node);
1953         p->soft.tm.h.n_nodes++;
1954
1955         /* Update dependencies */
1956         if (n->parent_node)
1957                 n->parent_node->n_children++;
1958
1959         if (n->shaper_profile)
1960                 n->shaper_profile->n_users++;
1961
1962         for (i = 0; i < params->n_shared_shapers; i++) {
1963                 struct tm_shared_shaper *ss;
1964
1965                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1966                 ss->n_users++;
1967         }
1968
1969         if (n->wred_profile)
1970                 n->wred_profile->n_users++;
1971
1972         p->soft.tm.h.n_tm_nodes[n->level]++;
1973
1974         return 0;
1975 }
1976
1977 /* Traffic manager node delete */
1978 static int
1979 pmd_tm_node_delete(struct rte_eth_dev *dev,
1980         uint32_t node_id,
1981         struct rte_tm_error *error)
1982 {
1983         struct pmd_internals *p = dev->data->dev_private;
1984         struct tm_node *n;
1985         uint32_t i;
1986
1987         /* Check hierarchy changes are currently allowed */
1988         if (p->soft.tm.hierarchy_frozen)
1989                 return -rte_tm_error_set(error,
1990                         EBUSY,
1991                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1992                         NULL,
1993                         rte_strerror(EBUSY));
1994
1995         /* Check existing */
1996         n = tm_node_search(dev, node_id);
1997         if (n == NULL)
1998                 return -rte_tm_error_set(error,
1999                         EINVAL,
2000                         RTE_TM_ERROR_TYPE_NODE_ID,
2001                         NULL,
2002                         rte_strerror(EINVAL));
2003
2004         /* Check unused */
2005         if (n->n_children)
2006                 return -rte_tm_error_set(error,
2007                         EBUSY,
2008                         RTE_TM_ERROR_TYPE_NODE_ID,
2009                         NULL,
2010                         rte_strerror(EBUSY));
2011
2012         /* Update dependencies */
2013         p->soft.tm.h.n_tm_nodes[n->level]--;
2014
2015         if (n->wred_profile)
2016                 n->wred_profile->n_users--;
2017
2018         for (i = 0; i < n->params.n_shared_shapers; i++) {
2019                 struct tm_shared_shaper *ss;
2020
2021                 ss = tm_shared_shaper_search(dev,
2022                                 n->params.shared_shaper_id[i]);
2023                 ss->n_users--;
2024         }
2025
2026         if (n->shaper_profile)
2027                 n->shaper_profile->n_users--;
2028
2029         if (n->parent_node)
2030                 n->parent_node->n_children--;
2031
2032         /* Remove from list */
2033         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2034         p->soft.tm.h.n_nodes--;
2035         free(n);
2036
2037         return 0;
2038 }
2039
2040
2041 static void
2042 pipe_profile_build(struct rte_eth_dev *dev,
2043         struct tm_node *np,
2044         struct rte_sched_pipe_params *pp)
2045 {
2046         struct pmd_internals *p = dev->data->dev_private;
2047         struct tm_hierarchy *h = &p->soft.tm.h;
2048         struct tm_node_list *nl = &h->nodes;
2049         struct tm_node *nt, *nq;
2050
2051         memset(pp, 0, sizeof(*pp));
2052
2053         /* Pipe */
2054         pp->tb_rate = np->shaper_profile->params.peak.rate;
2055         pp->tb_size = np->shaper_profile->params.peak.size;
2056
2057         /* Traffic Class (TC) */
2058         pp->tc_period = PIPE_TC_PERIOD;
2059
2060 #ifdef RTE_SCHED_SUBPORT_TC_OV
2061         pp->tc_ov_weight = np->weight;
2062 #endif
2063
2064         TAILQ_FOREACH(nt, nl, node) {
2065                 uint32_t queue_id = 0;
2066
2067                 if (nt->level != TM_NODE_LEVEL_TC ||
2068                         nt->parent_node_id != np->node_id)
2069                         continue;
2070
2071                 pp->tc_rate[nt->priority] =
2072                         nt->shaper_profile->params.peak.rate;
2073
2074                 /* Queue */
2075                 TAILQ_FOREACH(nq, nl, node) {
2076                         uint32_t pipe_queue_id;
2077
2078                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2079                                 nq->parent_node_id != nt->node_id)
2080                                 continue;
2081
2082                         pipe_queue_id = nt->priority *
2083                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2084                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2085
2086                         queue_id++;
2087                 }
2088         }
2089 }
2090
2091 static int
2092 pipe_profile_free_exists(struct rte_eth_dev *dev,
2093         uint32_t *pipe_profile_id)
2094 {
2095         struct pmd_internals *p = dev->data->dev_private;
2096         struct tm_params *t = &p->soft.tm.params;
2097
2098         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2099                 *pipe_profile_id = t->n_pipe_profiles;
2100                 return 1;
2101         }
2102
2103         return 0;
2104 }
2105
2106 static int
2107 pipe_profile_exists(struct rte_eth_dev *dev,
2108         struct rte_sched_pipe_params *pp,
2109         uint32_t *pipe_profile_id)
2110 {
2111         struct pmd_internals *p = dev->data->dev_private;
2112         struct tm_params *t = &p->soft.tm.params;
2113         uint32_t i;
2114
2115         for (i = 0; i < t->n_pipe_profiles; i++)
2116                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2117                         if (pipe_profile_id)
2118                                 *pipe_profile_id = i;
2119                         return 1;
2120                 }
2121
2122         return 0;
2123 }
2124
2125 static void
2126 pipe_profile_install(struct rte_eth_dev *dev,
2127         struct rte_sched_pipe_params *pp,
2128         uint32_t pipe_profile_id)
2129 {
2130         struct pmd_internals *p = dev->data->dev_private;
2131         struct tm_params *t = &p->soft.tm.params;
2132
2133         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2134         t->n_pipe_profiles++;
2135 }
2136
2137 static void
2138 pipe_profile_mark(struct rte_eth_dev *dev,
2139         uint32_t subport_id,
2140         uint32_t pipe_id,
2141         uint32_t pipe_profile_id)
2142 {
2143         struct pmd_internals *p = dev->data->dev_private;
2144         struct tm_hierarchy *h = &p->soft.tm.h;
2145         struct tm_params *t = &p->soft.tm.params;
2146         uint32_t n_pipes_per_subport, pos;
2147
2148         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2149                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2150         pos = subport_id * n_pipes_per_subport + pipe_id;
2151
2152         t->pipe_to_profile[pos] = pipe_profile_id;
2153 }
2154
2155 static struct rte_sched_pipe_params *
2156 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2157 {
2158         struct pmd_internals *p = dev->data->dev_private;
2159         struct tm_hierarchy *h = &p->soft.tm.h;
2160         struct tm_params *t = &p->soft.tm.params;
2161         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2162                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2163
2164         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2165         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2166
2167         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2168         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2169
2170         return &t->pipe_profiles[pipe_profile_id];
2171 }
2172
2173 static int
2174 pipe_profiles_generate(struct rte_eth_dev *dev)
2175 {
2176         struct pmd_internals *p = dev->data->dev_private;
2177         struct tm_hierarchy *h = &p->soft.tm.h;
2178         struct tm_node_list *nl = &h->nodes;
2179         struct tm_node *ns, *np;
2180         uint32_t subport_id;
2181
2182         /* Objective: Fill in the following fields in struct tm_params:
2183          *    - pipe_profiles
2184          *    - n_pipe_profiles
2185          *    - pipe_to_profile
2186          */
2187
2188         subport_id = 0;
2189         TAILQ_FOREACH(ns, nl, node) {
2190                 uint32_t pipe_id;
2191
2192                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2193                         continue;
2194
2195                 pipe_id = 0;
2196                 TAILQ_FOREACH(np, nl, node) {
2197                         struct rte_sched_pipe_params pp;
2198                         uint32_t pos;
2199
2200                         if (np->level != TM_NODE_LEVEL_PIPE ||
2201                                 np->parent_node_id != ns->node_id)
2202                                 continue;
2203
2204                         pipe_profile_build(dev, np, &pp);
2205
2206                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2207                                 if (!pipe_profile_free_exists(dev, &pos))
2208                                         return -1;
2209
2210                                 pipe_profile_install(dev, &pp, pos);
2211                         }
2212
2213                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2214
2215                         pipe_id++;
2216                 }
2217
2218                 subport_id++;
2219         }
2220
2221         return 0;
2222 }
2223
2224 static struct tm_wred_profile *
2225 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2226 {
2227         struct pmd_internals *p = dev->data->dev_private;
2228         struct tm_hierarchy *h = &p->soft.tm.h;
2229         struct tm_node_list *nl = &h->nodes;
2230         struct tm_node *nq;
2231
2232         TAILQ_FOREACH(nq, nl, node) {
2233                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2234                         nq->parent_node->priority != tc_id)
2235                         continue;
2236
2237                 return nq->wred_profile;
2238         }
2239
2240         return NULL;
2241 }
2242
2243 #ifdef RTE_SCHED_RED
2244
2245 static void
2246 wred_profiles_set(struct rte_eth_dev *dev)
2247 {
2248         struct pmd_internals *p = dev->data->dev_private;
2249         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2250         uint32_t tc_id;
2251         enum rte_tm_color color;
2252
2253         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2254                 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2255                         struct rte_red_params *dst =
2256                                 &pp->red_params[tc_id][color];
2257                         struct tm_wred_profile *src_wp =
2258                                 tm_tc_wred_profile_get(dev, tc_id);
2259                         struct rte_tm_red_params *src =
2260                                 &src_wp->params.red_params[color];
2261
2262                         memcpy(dst, src, sizeof(*dst));
2263                 }
2264 }
2265
2266 #else
2267
2268 #define wred_profiles_set(dev)
2269
2270 #endif
2271
2272 static struct tm_shared_shaper *
2273 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2274 {
2275         return (tc_node->params.n_shared_shapers) ?
2276                 tm_shared_shaper_search(dev,
2277                         tc_node->params.shared_shaper_id[0]) :
2278                 NULL;
2279 }
2280
2281 static struct tm_shared_shaper *
2282 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2283         struct tm_node *subport_node,
2284         uint32_t tc_id)
2285 {
2286         struct pmd_internals *p = dev->data->dev_private;
2287         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2288         struct tm_node *n;
2289
2290         TAILQ_FOREACH(n, nl, node) {
2291                 if (n->level != TM_NODE_LEVEL_TC ||
2292                         n->parent_node->parent_node_id !=
2293                                 subport_node->node_id ||
2294                         n->priority != tc_id)
2295                         continue;
2296
2297                 return tm_tc_shared_shaper_get(dev, n);
2298         }
2299
2300         return NULL;
2301 }
2302
2303 static int
2304 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2305 {
2306         struct pmd_internals *p = dev->data->dev_private;
2307         struct tm_hierarchy *h = &p->soft.tm.h;
2308         struct tm_node_list *nl = &h->nodes;
2309         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2310         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2311         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2312         struct tm_shared_shaper *ss;
2313
2314         uint32_t n_pipes_per_subport;
2315
2316         /* Root node exists. */
2317         if (nr == NULL)
2318                 return -rte_tm_error_set(error,
2319                         EINVAL,
2320                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2321                         NULL,
2322                         rte_strerror(EINVAL));
2323
2324         /* There is at least one subport, max is not exceeded. */
2325         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2326                 return -rte_tm_error_set(error,
2327                         EINVAL,
2328                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2329                         NULL,
2330                         rte_strerror(EINVAL));
2331
2332         /* There is at least one pipe. */
2333         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2334                 return -rte_tm_error_set(error,
2335                         EINVAL,
2336                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2337                         NULL,
2338                         rte_strerror(EINVAL));
2339
2340         /* Number of pipes is the same for all subports. Maximum number of pipes
2341          * per subport is not exceeded.
2342          */
2343         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2344                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2345
2346         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2347                 return -rte_tm_error_set(error,
2348                         EINVAL,
2349                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2350                         NULL,
2351                         rte_strerror(EINVAL));
2352
2353         TAILQ_FOREACH(ns, nl, node) {
2354                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2355                         continue;
2356
2357                 if (ns->n_children != n_pipes_per_subport)
2358                         return -rte_tm_error_set(error,
2359                                 EINVAL,
2360                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2361                                 NULL,
2362                                 rte_strerror(EINVAL));
2363         }
2364
2365         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2366         TAILQ_FOREACH(np, nl, node) {
2367                 uint32_t mask = 0, mask_expected =
2368                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2369                                 uint32_t);
2370
2371                 if (np->level != TM_NODE_LEVEL_PIPE)
2372                         continue;
2373
2374                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2375                         return -rte_tm_error_set(error,
2376                                 EINVAL,
2377                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2378                                 NULL,
2379                                 rte_strerror(EINVAL));
2380
2381                 TAILQ_FOREACH(nt, nl, node) {
2382                         if (nt->level != TM_NODE_LEVEL_TC ||
2383                                 nt->parent_node_id != np->node_id)
2384                                 continue;
2385
2386                         mask |= 1 << nt->priority;
2387                 }
2388
2389                 if (mask != mask_expected)
2390                         return -rte_tm_error_set(error,
2391                                 EINVAL,
2392                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2393                                 NULL,
2394                                 rte_strerror(EINVAL));
2395         }
2396
2397         /* Each TC has exactly 4 packet queues. */
2398         TAILQ_FOREACH(nt, nl, node) {
2399                 if (nt->level != TM_NODE_LEVEL_TC)
2400                         continue;
2401
2402                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2403                         return -rte_tm_error_set(error,
2404                                 EINVAL,
2405                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2406                                 NULL,
2407                                 rte_strerror(EINVAL));
2408         }
2409
2410         /**
2411          * Shared shapers:
2412          *    -For each TC #i, all pipes in the same subport use the same
2413          *     shared shaper (or no shared shaper) for their TC#i.
2414          *    -Each shared shaper needs to have at least one user. All its
2415          *     users have to be TC nodes with the same priority and the same
2416          *     subport.
2417          */
2418         TAILQ_FOREACH(ns, nl, node) {
2419                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2420                 uint32_t id;
2421
2422                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2423                         continue;
2424
2425                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2426                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2427
2428                 TAILQ_FOREACH(nt, nl, node) {
2429                         struct tm_shared_shaper *subport_ss, *tc_ss;
2430
2431                         if (nt->level != TM_NODE_LEVEL_TC ||
2432                                 nt->parent_node->parent_node_id !=
2433                                         ns->node_id)
2434                                 continue;
2435
2436                         subport_ss = s[nt->priority];
2437                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2438
2439                         if (subport_ss == NULL && tc_ss == NULL)
2440                                 continue;
2441
2442                         if ((subport_ss == NULL && tc_ss != NULL) ||
2443                                 (subport_ss != NULL && tc_ss == NULL) ||
2444                                 subport_ss->shared_shaper_id !=
2445                                         tc_ss->shared_shaper_id)
2446                                 return -rte_tm_error_set(error,
2447                                         EINVAL,
2448                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2449                                         NULL,
2450                                         rte_strerror(EINVAL));
2451                 }
2452         }
2453
2454         TAILQ_FOREACH(ss, ssl, node) {
2455                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2456                 uint32_t n_users = 0;
2457
2458                 if (nt_any != NULL)
2459                         TAILQ_FOREACH(nt, nl, node) {
2460                                 if (nt->level != TM_NODE_LEVEL_TC ||
2461                                         nt->priority != nt_any->priority ||
2462                                         nt->parent_node->parent_node_id !=
2463                                         nt_any->parent_node->parent_node_id)
2464                                         continue;
2465
2466                                 n_users++;
2467                         }
2468
2469                 if (ss->n_users == 0 || ss->n_users != n_users)
2470                         return -rte_tm_error_set(error,
2471                                 EINVAL,
2472                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2473                                 NULL,
2474                                 rte_strerror(EINVAL));
2475         }
2476
2477         /* Not too many pipe profiles. */
2478         if (pipe_profiles_generate(dev))
2479                 return -rte_tm_error_set(error,
2480                         EINVAL,
2481                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2482                         NULL,
2483                         rte_strerror(EINVAL));
2484
2485         /**
2486          * WRED (when used, i.e. at least one WRED profile defined):
2487          *    -Each WRED profile must have at least one user.
2488          *    -All leaf nodes must have their private WRED context enabled.
2489          *    -For each TC #i, all leaf nodes must use the same WRED profile
2490          *     for their private WRED context.
2491          */
2492         if (h->n_wred_profiles) {
2493                 struct tm_wred_profile *wp;
2494                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2495                 uint32_t id;
2496
2497                 TAILQ_FOREACH(wp, wpl, node)
2498                         if (wp->n_users == 0)
2499                                 return -rte_tm_error_set(error,
2500                                         EINVAL,
2501                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2502                                         NULL,
2503                                         rte_strerror(EINVAL));
2504
2505                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2506                         w[id] = tm_tc_wred_profile_get(dev, id);
2507
2508                         if (w[id] == NULL)
2509                                 return -rte_tm_error_set(error,
2510                                         EINVAL,
2511                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2512                                         NULL,
2513                                         rte_strerror(EINVAL));
2514                 }
2515
2516                 TAILQ_FOREACH(nq, nl, node) {
2517                         uint32_t id;
2518
2519                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2520                                 continue;
2521
2522                         id = nq->parent_node->priority;
2523
2524                         if (nq->wred_profile == NULL ||
2525                                 nq->wred_profile->wred_profile_id !=
2526                                         w[id]->wred_profile_id)
2527                                 return -rte_tm_error_set(error,
2528                                         EINVAL,
2529                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2530                                         NULL,
2531                                         rte_strerror(EINVAL));
2532                 }
2533         }
2534
2535         return 0;
2536 }
2537
2538 static void
2539 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2540 {
2541         struct pmd_internals *p = dev->data->dev_private;
2542         struct tm_params *t = &p->soft.tm.params;
2543         struct tm_hierarchy *h = &p->soft.tm.h;
2544
2545         struct tm_node_list *nl = &h->nodes;
2546         struct tm_node *root = tm_root_node_present(dev), *n;
2547
2548         uint32_t subport_id;
2549
2550         t->port_params = (struct rte_sched_port_params) {
2551                 .name = dev->data->name,
2552                 .socket = dev->data->numa_node,
2553                 .rate = root->shaper_profile->params.peak.rate,
2554                 .mtu = dev->data->mtu,
2555                 .frame_overhead =
2556                         root->shaper_profile->params.pkt_length_adjust,
2557                 .n_subports_per_port = root->n_children,
2558                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2559                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2560                 .qsize = {p->params.tm.qsize[0],
2561                         p->params.tm.qsize[1],
2562                         p->params.tm.qsize[2],
2563                         p->params.tm.qsize[3],
2564                 },
2565                 .pipe_profiles = t->pipe_profiles,
2566                 .n_pipe_profiles = t->n_pipe_profiles,
2567         };
2568
2569         wred_profiles_set(dev);
2570
2571         subport_id = 0;
2572         TAILQ_FOREACH(n, nl, node) {
2573                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2574                 uint32_t i;
2575
2576                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2577                         continue;
2578
2579                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2580                         struct tm_shared_shaper *ss;
2581                         struct tm_shaper_profile *sp;
2582
2583                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2584                         sp = (ss) ? tm_shaper_profile_search(dev,
2585                                 ss->shaper_profile_id) :
2586                                 n->shaper_profile;
2587                         tc_rate[i] = sp->params.peak.rate;
2588                 }
2589
2590                 t->subport_params[subport_id] =
2591                         (struct rte_sched_subport_params) {
2592                                 .tb_rate = n->shaper_profile->params.peak.rate,
2593                                 .tb_size = n->shaper_profile->params.peak.size,
2594
2595                                 .tc_rate = {tc_rate[0],
2596                                         tc_rate[1],
2597                                         tc_rate[2],
2598                                         tc_rate[3],
2599                         },
2600                         .tc_period = SUBPORT_TC_PERIOD,
2601                 };
2602
2603                 subport_id++;
2604         }
2605 }
2606
2607 /* Traffic manager hierarchy commit */
2608 static int
2609 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2610         int clear_on_fail,
2611         struct rte_tm_error *error)
2612 {
2613         struct pmd_internals *p = dev->data->dev_private;
2614         int status;
2615
2616         /* Checks */
2617         if (p->soft.tm.hierarchy_frozen)
2618                 return -rte_tm_error_set(error,
2619                         EBUSY,
2620                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2621                         NULL,
2622                         rte_strerror(EBUSY));
2623
2624         status = hierarchy_commit_check(dev, error);
2625         if (status) {
2626                 if (clear_on_fail) {
2627                         tm_hierarchy_uninit(p);
2628                         tm_hierarchy_init(p);
2629                 }
2630
2631                 return status;
2632         }
2633
2634         /* Create blueprints */
2635         hierarchy_blueprints_create(dev);
2636
2637         /* Freeze hierarchy */
2638         p->soft.tm.hierarchy_frozen = 1;
2639
2640         return 0;
2641 }
2642
2643 #ifdef RTE_SCHED_SUBPORT_TC_OV
2644
2645 static int
2646 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2647 {
2648         struct pmd_internals *p = dev->data->dev_private;
2649         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2650
2651         struct tm_node *ns = np->parent_node;
2652         uint32_t subport_id = tm_node_subport_id(dev, ns);
2653
2654         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2655         struct rte_sched_pipe_params profile1;
2656         uint32_t pipe_profile_id;
2657
2658         /* Derive new pipe profile. */
2659         memcpy(&profile1, profile0, sizeof(profile1));
2660         profile1.tc_ov_weight = (uint8_t)weight;
2661
2662         /* Since implementation does not allow adding more pipe profiles after
2663          * port configuration, the pipe configuration can be successfully
2664          * updated only if the new profile is also part of the existing set of
2665          * pipe profiles.
2666          */
2667         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2668                 return -1;
2669
2670         /* Update the pipe profile used by the current pipe. */
2671         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2672                 (int32_t)pipe_profile_id))
2673                 return -1;
2674
2675         /* Commit changes. */
2676         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2677         np->weight = weight;
2678
2679         return 0;
2680 }
2681
2682 #endif
2683
2684 static int
2685 update_queue_weight(struct rte_eth_dev *dev,
2686         struct tm_node *nq, uint32_t weight)
2687 {
2688         struct pmd_internals *p = dev->data->dev_private;
2689         uint32_t queue_id = tm_node_queue_id(dev, nq);
2690
2691         struct tm_node *nt = nq->parent_node;
2692         uint32_t tc_id = tm_node_tc_id(dev, nt);
2693
2694         struct tm_node *np = nt->parent_node;
2695         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2696
2697         struct tm_node *ns = np->parent_node;
2698         uint32_t subport_id = tm_node_subport_id(dev, ns);
2699
2700         uint32_t pipe_queue_id =
2701                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2702
2703         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2704         struct rte_sched_pipe_params profile1;
2705         uint32_t pipe_profile_id;
2706
2707         /* Derive new pipe profile. */
2708         memcpy(&profile1, profile0, sizeof(profile1));
2709         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2710
2711         /* Since implementation does not allow adding more pipe profiles after
2712          * port configuration, the pipe configuration can be successfully
2713          * updated only if the new profile is also part of the existing set
2714          * of pipe profiles.
2715          */
2716         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2717                 return -1;
2718
2719         /* Update the pipe profile used by the current pipe. */
2720         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2721                 (int32_t)pipe_profile_id))
2722                 return -1;
2723
2724         /* Commit changes. */
2725         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2726         nq->weight = weight;
2727
2728         return 0;
2729 }
2730
2731 /* Traffic manager node parent update */
2732 static int
2733 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2734         uint32_t node_id,
2735         uint32_t parent_node_id,
2736         uint32_t priority,
2737         uint32_t weight,
2738         struct rte_tm_error *error)
2739 {
2740         struct tm_node *n;
2741
2742         /* Port must be started and TM used. */
2743         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2744                 return -rte_tm_error_set(error,
2745                         EBUSY,
2746                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2747                         NULL,
2748                         rte_strerror(EBUSY));
2749
2750         /* Node must be valid */
2751         n = tm_node_search(dev, node_id);
2752         if (n == NULL)
2753                 return -rte_tm_error_set(error,
2754                         EINVAL,
2755                         RTE_TM_ERROR_TYPE_NODE_ID,
2756                         NULL,
2757                         rte_strerror(EINVAL));
2758
2759         /* Parent node must be the same */
2760         if (n->parent_node_id != parent_node_id)
2761                 return -rte_tm_error_set(error,
2762                         EINVAL,
2763                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2764                         NULL,
2765                         rte_strerror(EINVAL));
2766
2767         /* Priority must be the same */
2768         if (n->priority != priority)
2769                 return -rte_tm_error_set(error,
2770                         EINVAL,
2771                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2772                         NULL,
2773                         rte_strerror(EINVAL));
2774
2775         /* weight: must be 1 .. 255 */
2776         if (weight == 0 || weight >= UINT8_MAX)
2777                 return -rte_tm_error_set(error,
2778                         EINVAL,
2779                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2780                         NULL,
2781                         rte_strerror(EINVAL));
2782
2783         switch (n->level) {
2784         case TM_NODE_LEVEL_PORT:
2785                 return -rte_tm_error_set(error,
2786                         EINVAL,
2787                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2788                         NULL,
2789                         rte_strerror(EINVAL));
2790                 /* fall-through */
2791         case TM_NODE_LEVEL_SUBPORT:
2792                 return -rte_tm_error_set(error,
2793                         EINVAL,
2794                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2795                         NULL,
2796                         rte_strerror(EINVAL));
2797                 /* fall-through */
2798         case TM_NODE_LEVEL_PIPE:
2799 #ifdef RTE_SCHED_SUBPORT_TC_OV
2800                 if (update_pipe_weight(dev, n, weight))
2801                         return -rte_tm_error_set(error,
2802                                 EINVAL,
2803                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2804                                 NULL,
2805                                 rte_strerror(EINVAL));
2806                 return 0;
2807 #else
2808                 return -rte_tm_error_set(error,
2809                         EINVAL,
2810                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2811                         NULL,
2812                         rte_strerror(EINVAL));
2813 #endif
2814                 /* fall-through */
2815         case TM_NODE_LEVEL_TC:
2816                 return -rte_tm_error_set(error,
2817                         EINVAL,
2818                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2819                         NULL,
2820                         rte_strerror(EINVAL));
2821                 /* fall-through */
2822         case TM_NODE_LEVEL_QUEUE:
2823                 /* fall-through */
2824         default:
2825                 if (update_queue_weight(dev, n, weight))
2826                         return -rte_tm_error_set(error,
2827                                 EINVAL,
2828                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2829                                 NULL,
2830                                 rte_strerror(EINVAL));
2831                 return 0;
2832         }
2833 }
2834
2835 static int
2836 update_subport_rate(struct rte_eth_dev *dev,
2837         struct tm_node *ns,
2838         struct tm_shaper_profile *sp)
2839 {
2840         struct pmd_internals *p = dev->data->dev_private;
2841         uint32_t subport_id = tm_node_subport_id(dev, ns);
2842
2843         struct rte_sched_subport_params subport_params;
2844
2845         /* Derive new subport configuration. */
2846         memcpy(&subport_params,
2847                 &p->soft.tm.params.subport_params[subport_id],
2848                 sizeof(subport_params));
2849         subport_params.tb_rate = sp->params.peak.rate;
2850         subport_params.tb_size = sp->params.peak.size;
2851
2852         /* Update the subport configuration. */
2853         if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2854                 &subport_params))
2855                 return -1;
2856
2857         /* Commit changes. */
2858         ns->shaper_profile->n_users--;
2859
2860         ns->shaper_profile = sp;
2861         ns->params.shaper_profile_id = sp->shaper_profile_id;
2862         sp->n_users++;
2863
2864         memcpy(&p->soft.tm.params.subport_params[subport_id],
2865                 &subport_params,
2866                 sizeof(subport_params));
2867
2868         return 0;
2869 }
2870
2871 static int
2872 update_pipe_rate(struct rte_eth_dev *dev,
2873         struct tm_node *np,
2874         struct tm_shaper_profile *sp)
2875 {
2876         struct pmd_internals *p = dev->data->dev_private;
2877         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2878
2879         struct tm_node *ns = np->parent_node;
2880         uint32_t subport_id = tm_node_subport_id(dev, ns);
2881
2882         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2883         struct rte_sched_pipe_params profile1;
2884         uint32_t pipe_profile_id;
2885
2886         /* Derive new pipe profile. */
2887         memcpy(&profile1, profile0, sizeof(profile1));
2888         profile1.tb_rate = sp->params.peak.rate;
2889         profile1.tb_size = sp->params.peak.size;
2890
2891         /* Since implementation does not allow adding more pipe profiles after
2892          * port configuration, the pipe configuration can be successfully
2893          * updated only if the new profile is also part of the existing set of
2894          * pipe profiles.
2895          */
2896         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2897                 return -1;
2898
2899         /* Update the pipe profile used by the current pipe. */
2900         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2901                 (int32_t)pipe_profile_id))
2902                 return -1;
2903
2904         /* Commit changes. */
2905         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2906         np->shaper_profile->n_users--;
2907         np->shaper_profile = sp;
2908         np->params.shaper_profile_id = sp->shaper_profile_id;
2909         sp->n_users++;
2910
2911         return 0;
2912 }
2913
2914 static int
2915 update_tc_rate(struct rte_eth_dev *dev,
2916         struct tm_node *nt,
2917         struct tm_shaper_profile *sp)
2918 {
2919         struct pmd_internals *p = dev->data->dev_private;
2920         uint32_t tc_id = tm_node_tc_id(dev, nt);
2921
2922         struct tm_node *np = nt->parent_node;
2923         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2924
2925         struct tm_node *ns = np->parent_node;
2926         uint32_t subport_id = tm_node_subport_id(dev, ns);
2927
2928         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2929         struct rte_sched_pipe_params profile1;
2930         uint32_t pipe_profile_id;
2931
2932         /* Derive new pipe profile. */
2933         memcpy(&profile1, profile0, sizeof(profile1));
2934         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2935
2936         /* Since implementation does not allow adding more pipe profiles after
2937          * port configuration, the pipe configuration can be successfully
2938          * updated only if the new profile is also part of the existing set of
2939          * pipe profiles.
2940          */
2941         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2942                 return -1;
2943
2944         /* Update the pipe profile used by the current pipe. */
2945         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2946                 (int32_t)pipe_profile_id))
2947                 return -1;
2948
2949         /* Commit changes. */
2950         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2951         nt->shaper_profile->n_users--;
2952         nt->shaper_profile = sp;
2953         nt->params.shaper_profile_id = sp->shaper_profile_id;
2954         sp->n_users++;
2955
2956         return 0;
2957 }
2958
2959 /* Traffic manager node shaper update */
2960 static int
2961 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2962         uint32_t node_id,
2963         uint32_t shaper_profile_id,
2964         struct rte_tm_error *error)
2965 {
2966         struct tm_node *n;
2967         struct tm_shaper_profile *sp;
2968
2969         /* Port must be started and TM used. */
2970         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2971                 return -rte_tm_error_set(error,
2972                         EBUSY,
2973                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2974                         NULL,
2975                         rte_strerror(EBUSY));
2976
2977         /* Node must be valid */
2978         n = tm_node_search(dev, node_id);
2979         if (n == NULL)
2980                 return -rte_tm_error_set(error,
2981                         EINVAL,
2982                         RTE_TM_ERROR_TYPE_NODE_ID,
2983                         NULL,
2984                         rte_strerror(EINVAL));
2985
2986         /* Shaper profile must be valid. */
2987         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2988         if (sp == NULL)
2989                 return -rte_tm_error_set(error,
2990                         EINVAL,
2991                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2992                         NULL,
2993                         rte_strerror(EINVAL));
2994
2995         switch (n->level) {
2996         case TM_NODE_LEVEL_PORT:
2997                 return -rte_tm_error_set(error,
2998                         EINVAL,
2999                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3000                         NULL,
3001                         rte_strerror(EINVAL));
3002                 /* fall-through */
3003         case TM_NODE_LEVEL_SUBPORT:
3004                 if (update_subport_rate(dev, n, sp))
3005                         return -rte_tm_error_set(error,
3006                                 EINVAL,
3007                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3008                                 NULL,
3009                                 rte_strerror(EINVAL));
3010                 return 0;
3011                 /* fall-through */
3012         case TM_NODE_LEVEL_PIPE:
3013                 if (update_pipe_rate(dev, n, sp))
3014                         return -rte_tm_error_set(error,
3015                                 EINVAL,
3016                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3017                                 NULL,
3018                                 rte_strerror(EINVAL));
3019                 return 0;
3020                 /* fall-through */
3021         case TM_NODE_LEVEL_TC:
3022                 if (update_tc_rate(dev, n, sp))
3023                         return -rte_tm_error_set(error,
3024                                 EINVAL,
3025                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3026                                 NULL,
3027                                 rte_strerror(EINVAL));
3028                 return 0;
3029                 /* fall-through */
3030         case TM_NODE_LEVEL_QUEUE:
3031                 /* fall-through */
3032         default:
3033                 return -rte_tm_error_set(error,
3034                         EINVAL,
3035                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3036                         NULL,
3037                         rte_strerror(EINVAL));
3038         }
3039 }
3040
3041 static inline uint32_t
3042 tm_port_queue_id(struct rte_eth_dev *dev,
3043         uint32_t port_subport_id,
3044         uint32_t subport_pipe_id,
3045         uint32_t pipe_tc_id,
3046         uint32_t tc_queue_id)
3047 {
3048         struct pmd_internals *p = dev->data->dev_private;
3049         struct tm_hierarchy *h = &p->soft.tm.h;
3050         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3051                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3052
3053         uint32_t port_pipe_id =
3054                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3055         uint32_t port_tc_id =
3056                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3057         uint32_t port_queue_id =
3058                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3059
3060         return port_queue_id;
3061 }
3062
3063 static int
3064 read_port_stats(struct rte_eth_dev *dev,
3065         struct tm_node *nr,
3066         struct rte_tm_node_stats *stats,
3067         uint64_t *stats_mask,
3068         int clear)
3069 {
3070         struct pmd_internals *p = dev->data->dev_private;
3071         struct tm_hierarchy *h = &p->soft.tm.h;
3072         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3073         uint32_t subport_id;
3074
3075         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3076                 struct rte_sched_subport_stats s;
3077                 uint32_t tc_ov, id;
3078
3079                 /* Stats read */
3080                 int status = rte_sched_subport_read_stats(
3081                         p->soft.tm.sched,
3082                         subport_id,
3083                         &s,
3084                         &tc_ov);
3085                 if (status)
3086                         return status;
3087
3088                 /* Stats accumulate */
3089                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3090                         nr->stats.n_pkts +=
3091                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3092                         nr->stats.n_bytes +=
3093                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3094                         nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3095                                 s.n_pkts_tc_dropped[id];
3096                         nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3097                                 s.n_bytes_tc_dropped[id];
3098                 }
3099         }
3100
3101         /* Stats copy */
3102         if (stats)
3103                 memcpy(stats, &nr->stats, sizeof(*stats));
3104
3105         if (stats_mask)
3106                 *stats_mask = STATS_MASK_DEFAULT;
3107
3108         /* Stats clear */
3109         if (clear)
3110                 memset(&nr->stats, 0, sizeof(nr->stats));
3111
3112         return 0;
3113 }
3114
3115 static int
3116 read_subport_stats(struct rte_eth_dev *dev,
3117         struct tm_node *ns,
3118         struct rte_tm_node_stats *stats,
3119         uint64_t *stats_mask,
3120         int clear)
3121 {
3122         struct pmd_internals *p = dev->data->dev_private;
3123         uint32_t subport_id = tm_node_subport_id(dev, ns);
3124         struct rte_sched_subport_stats s;
3125         uint32_t tc_ov, tc_id;
3126
3127         /* Stats read */
3128         int status = rte_sched_subport_read_stats(
3129                 p->soft.tm.sched,
3130                 subport_id,
3131                 &s,
3132                 &tc_ov);
3133         if (status)
3134                 return status;
3135
3136         /* Stats accumulate */
3137         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3138                 ns->stats.n_pkts +=
3139                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3140                 ns->stats.n_bytes +=
3141                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3142                 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3143                         s.n_pkts_tc_dropped[tc_id];
3144                 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3145                         s.n_bytes_tc_dropped[tc_id];
3146         }
3147
3148         /* Stats copy */
3149         if (stats)
3150                 memcpy(stats, &ns->stats, sizeof(*stats));
3151
3152         if (stats_mask)
3153                 *stats_mask = STATS_MASK_DEFAULT;
3154
3155         /* Stats clear */
3156         if (clear)
3157                 memset(&ns->stats, 0, sizeof(ns->stats));
3158
3159         return 0;
3160 }
3161
3162 static int
3163 read_pipe_stats(struct rte_eth_dev *dev,
3164         struct tm_node *np,
3165         struct rte_tm_node_stats *stats,
3166         uint64_t *stats_mask,
3167         int clear)
3168 {
3169         struct pmd_internals *p = dev->data->dev_private;
3170
3171         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3172
3173         struct tm_node *ns = np->parent_node;
3174         uint32_t subport_id = tm_node_subport_id(dev, ns);
3175
3176         uint32_t i;
3177
3178         /* Stats read */
3179         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3180                 struct rte_sched_queue_stats s;
3181                 uint16_t qlen;
3182
3183                 uint32_t qid = tm_port_queue_id(dev,
3184                         subport_id,
3185                         pipe_id,
3186                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3187                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3188
3189                 int status = rte_sched_queue_read_stats(
3190                         p->soft.tm.sched,
3191                         qid,
3192                         &s,
3193                         &qlen);
3194                 if (status)
3195                         return status;
3196
3197                 /* Stats accumulate */
3198                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3199                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3200                 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3201                 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3202                         s.n_bytes_dropped;
3203                 np->stats.leaf.n_pkts_queued = qlen;
3204         }
3205
3206         /* Stats copy */
3207         if (stats)
3208                 memcpy(stats, &np->stats, sizeof(*stats));
3209
3210         if (stats_mask)
3211                 *stats_mask = STATS_MASK_DEFAULT;
3212
3213         /* Stats clear */
3214         if (clear)
3215                 memset(&np->stats, 0, sizeof(np->stats));
3216
3217         return 0;
3218 }
3219
3220 static int
3221 read_tc_stats(struct rte_eth_dev *dev,
3222         struct tm_node *nt,
3223         struct rte_tm_node_stats *stats,
3224         uint64_t *stats_mask,
3225         int clear)
3226 {
3227         struct pmd_internals *p = dev->data->dev_private;
3228
3229         uint32_t tc_id = tm_node_tc_id(dev, nt);
3230
3231         struct tm_node *np = nt->parent_node;
3232         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3233
3234         struct tm_node *ns = np->parent_node;
3235         uint32_t subport_id = tm_node_subport_id(dev, ns);
3236
3237         uint32_t i;
3238
3239         /* Stats read */
3240         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3241                 struct rte_sched_queue_stats s;
3242                 uint16_t qlen;
3243
3244                 uint32_t qid = tm_port_queue_id(dev,
3245                         subport_id,
3246                         pipe_id,
3247                         tc_id,
3248                         i);
3249
3250                 int status = rte_sched_queue_read_stats(
3251                         p->soft.tm.sched,
3252                         qid,
3253                         &s,
3254                         &qlen);
3255                 if (status)
3256                         return status;
3257
3258                 /* Stats accumulate */
3259                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3260                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3261                 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3262                 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3263                         s.n_bytes_dropped;
3264                 nt->stats.leaf.n_pkts_queued = qlen;
3265         }
3266
3267         /* Stats copy */
3268         if (stats)
3269                 memcpy(stats, &nt->stats, sizeof(*stats));
3270
3271         if (stats_mask)
3272                 *stats_mask = STATS_MASK_DEFAULT;
3273
3274         /* Stats clear */
3275         if (clear)
3276                 memset(&nt->stats, 0, sizeof(nt->stats));
3277
3278         return 0;
3279 }
3280
3281 static int
3282 read_queue_stats(struct rte_eth_dev *dev,
3283         struct tm_node *nq,
3284         struct rte_tm_node_stats *stats,
3285         uint64_t *stats_mask,
3286         int clear)
3287 {
3288         struct pmd_internals *p = dev->data->dev_private;
3289         struct rte_sched_queue_stats s;
3290         uint16_t qlen;
3291
3292         uint32_t queue_id = tm_node_queue_id(dev, nq);
3293
3294         struct tm_node *nt = nq->parent_node;
3295         uint32_t tc_id = tm_node_tc_id(dev, nt);
3296
3297         struct tm_node *np = nt->parent_node;
3298         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3299
3300         struct tm_node *ns = np->parent_node;
3301         uint32_t subport_id = tm_node_subport_id(dev, ns);
3302
3303         /* Stats read */
3304         uint32_t qid = tm_port_queue_id(dev,
3305                 subport_id,
3306                 pipe_id,
3307                 tc_id,
3308                 queue_id);
3309
3310         int status = rte_sched_queue_read_stats(
3311                 p->soft.tm.sched,
3312                 qid,
3313                 &s,
3314                 &qlen);
3315         if (status)
3316                 return status;
3317
3318         /* Stats accumulate */
3319         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3320         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3321         nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3322         nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3323                 s.n_bytes_dropped;
3324         nq->stats.leaf.n_pkts_queued = qlen;
3325
3326         /* Stats copy */
3327         if (stats)
3328                 memcpy(stats, &nq->stats, sizeof(*stats));
3329
3330         if (stats_mask)
3331                 *stats_mask = STATS_MASK_QUEUE;
3332
3333         /* Stats clear */
3334         if (clear)
3335                 memset(&nq->stats, 0, sizeof(nq->stats));
3336
3337         return 0;
3338 }
3339
3340 /* Traffic manager read stats counters for specific node */
3341 static int
3342 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3343         uint32_t node_id,
3344         struct rte_tm_node_stats *stats,
3345         uint64_t *stats_mask,
3346         int clear,
3347         struct rte_tm_error *error)
3348 {
3349         struct tm_node *n;
3350
3351         /* Port must be started and TM used. */
3352         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3353                 return -rte_tm_error_set(error,
3354                         EBUSY,
3355                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3356                         NULL,
3357                         rte_strerror(EBUSY));
3358
3359         /* Node must be valid */
3360         n = tm_node_search(dev, node_id);
3361         if (n == NULL)
3362                 return -rte_tm_error_set(error,
3363                         EINVAL,
3364                         RTE_TM_ERROR_TYPE_NODE_ID,
3365                         NULL,
3366                         rte_strerror(EINVAL));
3367
3368         switch (n->level) {
3369         case TM_NODE_LEVEL_PORT:
3370                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3371                         return -rte_tm_error_set(error,
3372                                 EINVAL,
3373                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3374                                 NULL,
3375                                 rte_strerror(EINVAL));
3376                 return 0;
3377
3378         case TM_NODE_LEVEL_SUBPORT:
3379                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3380                         return -rte_tm_error_set(error,
3381                                 EINVAL,
3382                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3383                                 NULL,
3384                                 rte_strerror(EINVAL));
3385                 return 0;
3386
3387         case TM_NODE_LEVEL_PIPE:
3388                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3389                         return -rte_tm_error_set(error,
3390                                 EINVAL,
3391                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3392                                 NULL,
3393                                 rte_strerror(EINVAL));
3394                 return 0;
3395
3396         case TM_NODE_LEVEL_TC:
3397                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3398                         return -rte_tm_error_set(error,
3399                                 EINVAL,
3400                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3401                                 NULL,
3402                                 rte_strerror(EINVAL));
3403                 return 0;
3404
3405         case TM_NODE_LEVEL_QUEUE:
3406         default:
3407                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3408                         return -rte_tm_error_set(error,
3409                                 EINVAL,
3410                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3411                                 NULL,
3412                                 rte_strerror(EINVAL));
3413                 return 0;
3414         }
3415 }
3416
3417 const struct rte_tm_ops pmd_tm_ops = {
3418         .node_type_get = pmd_tm_node_type_get,
3419         .capabilities_get = pmd_tm_capabilities_get,
3420         .level_capabilities_get = pmd_tm_level_capabilities_get,
3421         .node_capabilities_get = pmd_tm_node_capabilities_get,
3422
3423         .wred_profile_add = pmd_tm_wred_profile_add,
3424         .wred_profile_delete = pmd_tm_wred_profile_delete,
3425         .shared_wred_context_add_update = NULL,
3426         .shared_wred_context_delete = NULL,
3427
3428         .shaper_profile_add = pmd_tm_shaper_profile_add,
3429         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3430         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3431         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3432
3433         .node_add = pmd_tm_node_add,
3434         .node_delete = pmd_tm_node_delete,
3435         .node_suspend = NULL,
3436         .node_resume = NULL,
3437         .hierarchy_commit = pmd_tm_hierarchy_commit,
3438
3439         .node_parent_update = pmd_tm_node_parent_update,
3440         .node_shaper_update = pmd_tm_node_shaper_update,
3441         .node_shared_shaper_update = NULL,
3442         .node_stats_update = NULL,
3443         .node_wfq_weight_mode_update = NULL,
3444         .node_cman_update = NULL,
3445         .node_wred_context_update = NULL,
3446         .node_shared_wred_context_update = NULL,
3447
3448         .node_stats_read = pmd_tm_node_stats_read,
3449 };