8da8310e2be1f7610fcbc3ca4e55bd2da6a30e33
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
13
14 #define BYTES_IN_MBPS           (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 static void
19 tm_hierarchy_init(struct pmd_internals *p)
20 {
21         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
22
23         /* Initialize shaper profile list */
24         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
25
26         /* Initialize shared shaper list */
27         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
28
29         /* Initialize wred profile list */
30         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
31
32         /* Initialize TM node list */
33         TAILQ_INIT(&p->soft.tm.h.nodes);
34 }
35
36 static void
37 tm_hierarchy_uninit(struct pmd_internals *p)
38 {
39         /* Remove all nodes*/
40         for ( ; ; ) {
41                 struct tm_node *tm_node;
42
43                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
44                 if (tm_node == NULL)
45                         break;
46
47                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
48                 free(tm_node);
49         }
50
51         /* Remove all WRED profiles */
52         for ( ; ; ) {
53                 struct tm_wred_profile *wred_profile;
54
55                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
56                 if (wred_profile == NULL)
57                         break;
58
59                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
60                 free(wred_profile);
61         }
62
63         /* Remove all shared shapers */
64         for ( ; ; ) {
65                 struct tm_shared_shaper *shared_shaper;
66
67                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
68                 if (shared_shaper == NULL)
69                         break;
70
71                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
72                 free(shared_shaper);
73         }
74
75         /* Remove all shaper profiles */
76         for ( ; ; ) {
77                 struct tm_shaper_profile *shaper_profile;
78
79                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
80                 if (shaper_profile == NULL)
81                         break;
82
83                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
84                         shaper_profile, node);
85                 free(shaper_profile);
86         }
87
88         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
89 }
90
91 int
92 tm_init(struct pmd_internals *p,
93         struct pmd_params *params __rte_unused,
94         int numa_node __rte_unused)
95 {
96         tm_hierarchy_init(p);
97
98         return 0;
99 }
100
101 void
102 tm_free(struct pmd_internals *p)
103 {
104         tm_hierarchy_uninit(p);
105 }
106
107 int
108 tm_start(struct pmd_internals *p)
109 {
110         struct tm_params *t = &p->soft.tm.params;
111         uint32_t n_subports, subport_id;
112         int status;
113
114         /* Is hierarchy frozen? */
115         if (p->soft.tm.hierarchy_frozen == 0)
116                 return -1;
117
118         /* Port */
119         p->soft.tm.sched = rte_sched_port_config(&t->port_params);
120         if (p->soft.tm.sched == NULL)
121                 return -1;
122
123         /* Subport */
124         n_subports = t->port_params.n_subports_per_port;
125         for (subport_id = 0; subport_id < n_subports; subport_id++) {
126                 uint32_t n_pipes_per_subport =
127                         t->port_params.n_pipes_per_subport;
128                 uint32_t pipe_id;
129
130                 status = rte_sched_subport_config(p->soft.tm.sched,
131                         subport_id,
132                         &t->subport_params[subport_id]);
133                 if (status) {
134                         rte_sched_port_free(p->soft.tm.sched);
135                         return -1;
136                 }
137
138                 /* Pipe */
139                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
140                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
141                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
142                                 pipe_id;
143                         int profile_id = t->pipe_to_profile[pos];
144
145                         if (profile_id < 0)
146                                 continue;
147
148                         status = rte_sched_pipe_config(p->soft.tm.sched,
149                                 subport_id,
150                                 pipe_id,
151                                 profile_id);
152                         if (status) {
153                                 rte_sched_port_free(p->soft.tm.sched);
154                                 return -1;
155                         }
156                 }
157         }
158
159         return 0;
160 }
161
162 void
163 tm_stop(struct pmd_internals *p)
164 {
165         if (p->soft.tm.sched)
166                 rte_sched_port_free(p->soft.tm.sched);
167
168         /* Unfreeze hierarchy */
169         p->soft.tm.hierarchy_frozen = 0;
170 }
171
172 static struct tm_shaper_profile *
173 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
174 {
175         struct pmd_internals *p = dev->data->dev_private;
176         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
177         struct tm_shaper_profile *sp;
178
179         TAILQ_FOREACH(sp, spl, node)
180                 if (shaper_profile_id == sp->shaper_profile_id)
181                         return sp;
182
183         return NULL;
184 }
185
186 static struct tm_shared_shaper *
187 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
188 {
189         struct pmd_internals *p = dev->data->dev_private;
190         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
191         struct tm_shared_shaper *ss;
192
193         TAILQ_FOREACH(ss, ssl, node)
194                 if (shared_shaper_id == ss->shared_shaper_id)
195                         return ss;
196
197         return NULL;
198 }
199
200 static struct tm_wred_profile *
201 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
202 {
203         struct pmd_internals *p = dev->data->dev_private;
204         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
205         struct tm_wred_profile *wp;
206
207         TAILQ_FOREACH(wp, wpl, node)
208                 if (wred_profile_id == wp->wred_profile_id)
209                         return wp;
210
211         return NULL;
212 }
213
214 static struct tm_node *
215 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
216 {
217         struct pmd_internals *p = dev->data->dev_private;
218         struct tm_node_list *nl = &p->soft.tm.h.nodes;
219         struct tm_node *n;
220
221         TAILQ_FOREACH(n, nl, node)
222                 if (n->node_id == node_id)
223                         return n;
224
225         return NULL;
226 }
227
228 static struct tm_node *
229 tm_root_node_present(struct rte_eth_dev *dev)
230 {
231         struct pmd_internals *p = dev->data->dev_private;
232         struct tm_node_list *nl = &p->soft.tm.h.nodes;
233         struct tm_node *n;
234
235         TAILQ_FOREACH(n, nl, node)
236                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
237                         return n;
238
239         return NULL;
240 }
241
242 static uint32_t
243 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
244 {
245         struct pmd_internals *p = dev->data->dev_private;
246         struct tm_node_list *nl = &p->soft.tm.h.nodes;
247         struct tm_node *ns;
248         uint32_t subport_id;
249
250         subport_id = 0;
251         TAILQ_FOREACH(ns, nl, node) {
252                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
253                         continue;
254
255                 if (ns->node_id == subport_node->node_id)
256                         return subport_id;
257
258                 subport_id++;
259         }
260
261         return UINT32_MAX;
262 }
263
264 static uint32_t
265 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
266 {
267         struct pmd_internals *p = dev->data->dev_private;
268         struct tm_node_list *nl = &p->soft.tm.h.nodes;
269         struct tm_node *np;
270         uint32_t pipe_id;
271
272         pipe_id = 0;
273         TAILQ_FOREACH(np, nl, node) {
274                 if (np->level != TM_NODE_LEVEL_PIPE ||
275                         np->parent_node_id != pipe_node->parent_node_id)
276                         continue;
277
278                 if (np->node_id == pipe_node->node_id)
279                         return pipe_id;
280
281                 pipe_id++;
282         }
283
284         return UINT32_MAX;
285 }
286
287 static uint32_t
288 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
289 {
290         return tc_node->priority;
291 }
292
293 static uint32_t
294 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
295 {
296         struct pmd_internals *p = dev->data->dev_private;
297         struct tm_node_list *nl = &p->soft.tm.h.nodes;
298         struct tm_node *nq;
299         uint32_t queue_id;
300
301         queue_id = 0;
302         TAILQ_FOREACH(nq, nl, node) {
303                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
304                         nq->parent_node_id != queue_node->parent_node_id)
305                         continue;
306
307                 if (nq->node_id == queue_node->node_id)
308                         return queue_id;
309
310                 queue_id++;
311         }
312
313         return UINT32_MAX;
314 }
315
316 static uint32_t
317 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
318 {
319         struct pmd_internals *p = dev->data->dev_private;
320         uint32_t n_queues_max = p->params.tm.n_queues;
321         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
322         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
323         uint32_t n_subports_max = n_pipes_max;
324         uint32_t n_root_max = 1;
325
326         switch (level) {
327         case TM_NODE_LEVEL_PORT:
328                 return n_root_max;
329         case TM_NODE_LEVEL_SUBPORT:
330                 return n_subports_max;
331         case TM_NODE_LEVEL_PIPE:
332                 return n_pipes_max;
333         case TM_NODE_LEVEL_TC:
334                 return n_tc_max;
335         case TM_NODE_LEVEL_QUEUE:
336         default:
337                 return n_queues_max;
338         }
339 }
340
341 /* Traffic manager node type get */
342 static int
343 pmd_tm_node_type_get(struct rte_eth_dev *dev,
344         uint32_t node_id,
345         int *is_leaf,
346         struct rte_tm_error *error)
347 {
348         struct pmd_internals *p = dev->data->dev_private;
349
350         if (is_leaf == NULL)
351                 return -rte_tm_error_set(error,
352                    EINVAL,
353                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
354                    NULL,
355                    rte_strerror(EINVAL));
356
357         if (node_id == RTE_TM_NODE_ID_NULL ||
358                 (tm_node_search(dev, node_id) == NULL))
359                 return -rte_tm_error_set(error,
360                    EINVAL,
361                    RTE_TM_ERROR_TYPE_NODE_ID,
362                    NULL,
363                    rte_strerror(EINVAL));
364
365         *is_leaf = node_id < p->params.tm.n_queues;
366
367         return 0;
368 }
369
370 #ifdef RTE_SCHED_RED
371 #define WRED_SUPPORTED                                          1
372 #else
373 #define WRED_SUPPORTED                                          0
374 #endif
375
376 #define STATS_MASK_DEFAULT                                      \
377         (RTE_TM_STATS_N_PKTS |                                  \
378         RTE_TM_STATS_N_BYTES |                                  \
379         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
380         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
381
382 #define STATS_MASK_QUEUE                                                \
383         (STATS_MASK_DEFAULT |                                   \
384         RTE_TM_STATS_N_PKTS_QUEUED)
385
386 static const struct rte_tm_capabilities tm_cap = {
387         .n_nodes_max = UINT32_MAX,
388         .n_levels_max = TM_NODE_LEVEL_MAX,
389
390         .non_leaf_nodes_identical = 0,
391         .leaf_nodes_identical = 1,
392
393         .shaper_n_max = UINT32_MAX,
394         .shaper_private_n_max = UINT32_MAX,
395         .shaper_private_dual_rate_n_max = 0,
396         .shaper_private_rate_min = 1,
397         .shaper_private_rate_max = UINT32_MAX,
398
399         .shaper_shared_n_max = UINT32_MAX,
400         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
401         .shaper_shared_n_shapers_per_node_max = 1,
402         .shaper_shared_dual_rate_n_max = 0,
403         .shaper_shared_rate_min = 1,
404         .shaper_shared_rate_max = UINT32_MAX,
405
406         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
407         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
408
409         .sched_n_children_max = UINT32_MAX,
410         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
411         .sched_wfq_n_children_per_group_max = UINT32_MAX,
412         .sched_wfq_n_groups_max = 1,
413         .sched_wfq_weight_max = UINT32_MAX,
414
415         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
416         .cman_wred_byte_mode_supported = 0,
417         .cman_head_drop_supported = 0,
418         .cman_wred_context_n_max = 0,
419         .cman_wred_context_private_n_max = 0,
420         .cman_wred_context_shared_n_max = 0,
421         .cman_wred_context_shared_n_nodes_per_context_max = 0,
422         .cman_wred_context_shared_n_contexts_per_node_max = 0,
423
424         .mark_vlan_dei_supported = {0, 0, 0},
425         .mark_ip_ecn_tcp_supported = {0, 0, 0},
426         .mark_ip_ecn_sctp_supported = {0, 0, 0},
427         .mark_ip_dscp_supported = {0, 0, 0},
428
429         .dynamic_update_mask = 0,
430
431         .stats_mask = STATS_MASK_QUEUE,
432 };
433
434 /* Traffic manager capabilities get */
435 static int
436 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
437         struct rte_tm_capabilities *cap,
438         struct rte_tm_error *error)
439 {
440         if (cap == NULL)
441                 return -rte_tm_error_set(error,
442                    EINVAL,
443                    RTE_TM_ERROR_TYPE_CAPABILITIES,
444                    NULL,
445                    rte_strerror(EINVAL));
446
447         memcpy(cap, &tm_cap, sizeof(*cap));
448
449         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
450                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
451                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
452                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
453                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
454
455         cap->shaper_private_n_max =
456                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
457                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
458                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
459                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
460
461         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
462                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
463
464         cap->shaper_n_max = cap->shaper_private_n_max +
465                 cap->shaper_shared_n_max;
466
467         cap->shaper_shared_n_nodes_per_shaper_max =
468                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
469
470         cap->sched_n_children_max = RTE_MAX(
471                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
472                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
473
474         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
475
476         if (WRED_SUPPORTED)
477                 cap->cman_wred_context_private_n_max =
478                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
479
480         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
481                 cap->cman_wred_context_shared_n_max;
482
483         return 0;
484 }
485
486 static const struct rte_tm_level_capabilities tm_level_cap[] = {
487         [TM_NODE_LEVEL_PORT] = {
488                 .n_nodes_max = 1,
489                 .n_nodes_nonleaf_max = 1,
490                 .n_nodes_leaf_max = 0,
491                 .non_leaf_nodes_identical = 1,
492                 .leaf_nodes_identical = 0,
493
494                 {.nonleaf = {
495                         .shaper_private_supported = 1,
496                         .shaper_private_dual_rate_supported = 0,
497                         .shaper_private_rate_min = 1,
498                         .shaper_private_rate_max = UINT32_MAX,
499                         .shaper_shared_n_max = 0,
500
501                         .sched_n_children_max = UINT32_MAX,
502                         .sched_sp_n_priorities_max = 1,
503                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
504                         .sched_wfq_n_groups_max = 1,
505                         .sched_wfq_weight_max = 1,
506
507                         .stats_mask = STATS_MASK_DEFAULT,
508                 } },
509         },
510
511         [TM_NODE_LEVEL_SUBPORT] = {
512                 .n_nodes_max = UINT32_MAX,
513                 .n_nodes_nonleaf_max = UINT32_MAX,
514                 .n_nodes_leaf_max = 0,
515                 .non_leaf_nodes_identical = 1,
516                 .leaf_nodes_identical = 0,
517
518                 {.nonleaf = {
519                         .shaper_private_supported = 1,
520                         .shaper_private_dual_rate_supported = 0,
521                         .shaper_private_rate_min = 1,
522                         .shaper_private_rate_max = UINT32_MAX,
523                         .shaper_shared_n_max = 0,
524
525                         .sched_n_children_max = UINT32_MAX,
526                         .sched_sp_n_priorities_max = 1,
527                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
528                         .sched_wfq_n_groups_max = 1,
529 #ifdef RTE_SCHED_SUBPORT_TC_OV
530                         .sched_wfq_weight_max = UINT32_MAX,
531 #else
532                         .sched_wfq_weight_max = 1,
533 #endif
534                         .stats_mask = STATS_MASK_DEFAULT,
535                 } },
536         },
537
538         [TM_NODE_LEVEL_PIPE] = {
539                 .n_nodes_max = UINT32_MAX,
540                 .n_nodes_nonleaf_max = UINT32_MAX,
541                 .n_nodes_leaf_max = 0,
542                 .non_leaf_nodes_identical = 1,
543                 .leaf_nodes_identical = 0,
544
545                 {.nonleaf = {
546                         .shaper_private_supported = 1,
547                         .shaper_private_dual_rate_supported = 0,
548                         .shaper_private_rate_min = 1,
549                         .shaper_private_rate_max = UINT32_MAX,
550                         .shaper_shared_n_max = 0,
551
552                         .sched_n_children_max =
553                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
554                         .sched_sp_n_priorities_max =
555                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
556                         .sched_wfq_n_children_per_group_max = 1,
557                         .sched_wfq_n_groups_max = 0,
558                         .sched_wfq_weight_max = 1,
559
560                         .stats_mask = STATS_MASK_DEFAULT,
561                 } },
562         },
563
564         [TM_NODE_LEVEL_TC] = {
565                 .n_nodes_max = UINT32_MAX,
566                 .n_nodes_nonleaf_max = UINT32_MAX,
567                 .n_nodes_leaf_max = 0,
568                 .non_leaf_nodes_identical = 1,
569                 .leaf_nodes_identical = 0,
570
571                 {.nonleaf = {
572                         .shaper_private_supported = 1,
573                         .shaper_private_dual_rate_supported = 0,
574                         .shaper_private_rate_min = 1,
575                         .shaper_private_rate_max = UINT32_MAX,
576                         .shaper_shared_n_max = 1,
577
578                         .sched_n_children_max =
579                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
580                         .sched_sp_n_priorities_max = 1,
581                         .sched_wfq_n_children_per_group_max =
582                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
583                         .sched_wfq_n_groups_max = 1,
584                         .sched_wfq_weight_max = UINT32_MAX,
585
586                         .stats_mask = STATS_MASK_DEFAULT,
587                 } },
588         },
589
590         [TM_NODE_LEVEL_QUEUE] = {
591                 .n_nodes_max = UINT32_MAX,
592                 .n_nodes_nonleaf_max = 0,
593                 .n_nodes_leaf_max = UINT32_MAX,
594                 .non_leaf_nodes_identical = 0,
595                 .leaf_nodes_identical = 1,
596
597                 {.leaf = {
598                         .shaper_private_supported = 0,
599                         .shaper_private_dual_rate_supported = 0,
600                         .shaper_private_rate_min = 0,
601                         .shaper_private_rate_max = 0,
602                         .shaper_shared_n_max = 0,
603
604                         .cman_head_drop_supported = 0,
605                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
606                         .cman_wred_byte_mode_supported = 0,
607                         .cman_wred_context_private_supported = WRED_SUPPORTED,
608                         .cman_wred_context_shared_n_max = 0,
609
610                         .stats_mask = STATS_MASK_QUEUE,
611                 } },
612         },
613 };
614
615 /* Traffic manager level capabilities get */
616 static int
617 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
618         uint32_t level_id,
619         struct rte_tm_level_capabilities *cap,
620         struct rte_tm_error *error)
621 {
622         if (cap == NULL)
623                 return -rte_tm_error_set(error,
624                    EINVAL,
625                    RTE_TM_ERROR_TYPE_CAPABILITIES,
626                    NULL,
627                    rte_strerror(EINVAL));
628
629         if (level_id >= TM_NODE_LEVEL_MAX)
630                 return -rte_tm_error_set(error,
631                    EINVAL,
632                    RTE_TM_ERROR_TYPE_LEVEL_ID,
633                    NULL,
634                    rte_strerror(EINVAL));
635
636         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
637
638         switch (level_id) {
639         case TM_NODE_LEVEL_PORT:
640                 cap->nonleaf.sched_n_children_max =
641                         tm_level_get_max_nodes(dev,
642                                 TM_NODE_LEVEL_SUBPORT);
643                 cap->nonleaf.sched_wfq_n_children_per_group_max =
644                         cap->nonleaf.sched_n_children_max;
645                 break;
646
647         case TM_NODE_LEVEL_SUBPORT:
648                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
649                         TM_NODE_LEVEL_SUBPORT);
650                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
651                 cap->nonleaf.sched_n_children_max =
652                         tm_level_get_max_nodes(dev,
653                                 TM_NODE_LEVEL_PIPE);
654                 cap->nonleaf.sched_wfq_n_children_per_group_max =
655                         cap->nonleaf.sched_n_children_max;
656                 break;
657
658         case TM_NODE_LEVEL_PIPE:
659                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
660                         TM_NODE_LEVEL_PIPE);
661                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
662                 break;
663
664         case TM_NODE_LEVEL_TC:
665                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
666                         TM_NODE_LEVEL_TC);
667                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
668                 break;
669
670         case TM_NODE_LEVEL_QUEUE:
671         default:
672                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
673                         TM_NODE_LEVEL_QUEUE);
674                 cap->n_nodes_leaf_max = cap->n_nodes_max;
675                 break;
676         }
677
678         return 0;
679 }
680
681 static const struct rte_tm_node_capabilities tm_node_cap[] = {
682         [TM_NODE_LEVEL_PORT] = {
683                 .shaper_private_supported = 1,
684                 .shaper_private_dual_rate_supported = 0,
685                 .shaper_private_rate_min = 1,
686                 .shaper_private_rate_max = UINT32_MAX,
687                 .shaper_shared_n_max = 0,
688
689                 {.nonleaf = {
690                         .sched_n_children_max = UINT32_MAX,
691                         .sched_sp_n_priorities_max = 1,
692                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
693                         .sched_wfq_n_groups_max = 1,
694                         .sched_wfq_weight_max = 1,
695                 } },
696
697                 .stats_mask = STATS_MASK_DEFAULT,
698         },
699
700         [TM_NODE_LEVEL_SUBPORT] = {
701                 .shaper_private_supported = 1,
702                 .shaper_private_dual_rate_supported = 0,
703                 .shaper_private_rate_min = 1,
704                 .shaper_private_rate_max = UINT32_MAX,
705                 .shaper_shared_n_max = 0,
706
707                 {.nonleaf = {
708                         .sched_n_children_max = UINT32_MAX,
709                         .sched_sp_n_priorities_max = 1,
710                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
711                         .sched_wfq_n_groups_max = 1,
712                         .sched_wfq_weight_max = UINT32_MAX,
713                 } },
714
715                 .stats_mask = STATS_MASK_DEFAULT,
716         },
717
718         [TM_NODE_LEVEL_PIPE] = {
719                 .shaper_private_supported = 1,
720                 .shaper_private_dual_rate_supported = 0,
721                 .shaper_private_rate_min = 1,
722                 .shaper_private_rate_max = UINT32_MAX,
723                 .shaper_shared_n_max = 0,
724
725                 {.nonleaf = {
726                         .sched_n_children_max =
727                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
728                         .sched_sp_n_priorities_max =
729                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
730                         .sched_wfq_n_children_per_group_max = 1,
731                         .sched_wfq_n_groups_max = 0,
732                         .sched_wfq_weight_max = 1,
733                 } },
734
735                 .stats_mask = STATS_MASK_DEFAULT,
736         },
737
738         [TM_NODE_LEVEL_TC] = {
739                 .shaper_private_supported = 1,
740                 .shaper_private_dual_rate_supported = 0,
741                 .shaper_private_rate_min = 1,
742                 .shaper_private_rate_max = UINT32_MAX,
743                 .shaper_shared_n_max = 1,
744
745                 {.nonleaf = {
746                         .sched_n_children_max =
747                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
748                         .sched_sp_n_priorities_max = 1,
749                         .sched_wfq_n_children_per_group_max =
750                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
751                         .sched_wfq_n_groups_max = 1,
752                         .sched_wfq_weight_max = UINT32_MAX,
753                 } },
754
755                 .stats_mask = STATS_MASK_DEFAULT,
756         },
757
758         [TM_NODE_LEVEL_QUEUE] = {
759                 .shaper_private_supported = 0,
760                 .shaper_private_dual_rate_supported = 0,
761                 .shaper_private_rate_min = 0,
762                 .shaper_private_rate_max = 0,
763                 .shaper_shared_n_max = 0,
764
765
766                 {.leaf = {
767                         .cman_head_drop_supported = 0,
768                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
769                         .cman_wred_byte_mode_supported = 0,
770                         .cman_wred_context_private_supported = WRED_SUPPORTED,
771                         .cman_wred_context_shared_n_max = 0,
772                 } },
773
774                 .stats_mask = STATS_MASK_QUEUE,
775         },
776 };
777
778 /* Traffic manager node capabilities get */
779 static int
780 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
781         uint32_t node_id,
782         struct rte_tm_node_capabilities *cap,
783         struct rte_tm_error *error)
784 {
785         struct tm_node *tm_node;
786
787         if (cap == NULL)
788                 return -rte_tm_error_set(error,
789                    EINVAL,
790                    RTE_TM_ERROR_TYPE_CAPABILITIES,
791                    NULL,
792                    rte_strerror(EINVAL));
793
794         tm_node = tm_node_search(dev, node_id);
795         if (tm_node == NULL)
796                 return -rte_tm_error_set(error,
797                    EINVAL,
798                    RTE_TM_ERROR_TYPE_NODE_ID,
799                    NULL,
800                    rte_strerror(EINVAL));
801
802         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
803
804         switch (tm_node->level) {
805         case TM_NODE_LEVEL_PORT:
806                 cap->nonleaf.sched_n_children_max =
807                         tm_level_get_max_nodes(dev,
808                                 TM_NODE_LEVEL_SUBPORT);
809                 cap->nonleaf.sched_wfq_n_children_per_group_max =
810                         cap->nonleaf.sched_n_children_max;
811                 break;
812
813         case TM_NODE_LEVEL_SUBPORT:
814                 cap->nonleaf.sched_n_children_max =
815                         tm_level_get_max_nodes(dev,
816                                 TM_NODE_LEVEL_PIPE);
817                 cap->nonleaf.sched_wfq_n_children_per_group_max =
818                         cap->nonleaf.sched_n_children_max;
819                 break;
820
821         case TM_NODE_LEVEL_PIPE:
822         case TM_NODE_LEVEL_TC:
823         case TM_NODE_LEVEL_QUEUE:
824         default:
825                 break;
826         }
827
828         return 0;
829 }
830
831 static int
832 shaper_profile_check(struct rte_eth_dev *dev,
833         uint32_t shaper_profile_id,
834         struct rte_tm_shaper_params *profile,
835         struct rte_tm_error *error)
836 {
837         struct tm_shaper_profile *sp;
838
839         /* Shaper profile ID must not be NONE. */
840         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
841                 return -rte_tm_error_set(error,
842                         EINVAL,
843                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
844                         NULL,
845                         rte_strerror(EINVAL));
846
847         /* Shaper profile must not exist. */
848         sp = tm_shaper_profile_search(dev, shaper_profile_id);
849         if (sp)
850                 return -rte_tm_error_set(error,
851                         EEXIST,
852                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
853                         NULL,
854                         rte_strerror(EEXIST));
855
856         /* Profile must not be NULL. */
857         if (profile == NULL)
858                 return -rte_tm_error_set(error,
859                         EINVAL,
860                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
861                         NULL,
862                         rte_strerror(EINVAL));
863
864         /* Peak rate: non-zero, 32-bit */
865         if (profile->peak.rate == 0 ||
866                 profile->peak.rate >= UINT32_MAX)
867                 return -rte_tm_error_set(error,
868                         EINVAL,
869                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
870                         NULL,
871                         rte_strerror(EINVAL));
872
873         /* Peak size: non-zero, 32-bit */
874         if (profile->peak.size == 0 ||
875                 profile->peak.size >= UINT32_MAX)
876                 return -rte_tm_error_set(error,
877                         EINVAL,
878                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
879                         NULL,
880                         rte_strerror(EINVAL));
881
882         /* Dual-rate profiles are not supported. */
883         if (profile->committed.rate != 0)
884                 return -rte_tm_error_set(error,
885                         EINVAL,
886                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
887                         NULL,
888                         rte_strerror(EINVAL));
889
890         /* Packet length adjust: 24 bytes */
891         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
892                 return -rte_tm_error_set(error,
893                         EINVAL,
894                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
895                         NULL,
896                         rte_strerror(EINVAL));
897
898         return 0;
899 }
900
901 /* Traffic manager shaper profile add */
902 static int
903 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
904         uint32_t shaper_profile_id,
905         struct rte_tm_shaper_params *profile,
906         struct rte_tm_error *error)
907 {
908         struct pmd_internals *p = dev->data->dev_private;
909         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
910         struct tm_shaper_profile *sp;
911         int status;
912
913         /* Check input params */
914         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
915         if (status)
916                 return status;
917
918         /* Memory allocation */
919         sp = calloc(1, sizeof(struct tm_shaper_profile));
920         if (sp == NULL)
921                 return -rte_tm_error_set(error,
922                         ENOMEM,
923                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
924                         NULL,
925                         rte_strerror(ENOMEM));
926
927         /* Fill in */
928         sp->shaper_profile_id = shaper_profile_id;
929         memcpy(&sp->params, profile, sizeof(sp->params));
930
931         /* Add to list */
932         TAILQ_INSERT_TAIL(spl, sp, node);
933         p->soft.tm.h.n_shaper_profiles++;
934
935         return 0;
936 }
937
938 /* Traffic manager shaper profile delete */
939 static int
940 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
941         uint32_t shaper_profile_id,
942         struct rte_tm_error *error)
943 {
944         struct pmd_internals *p = dev->data->dev_private;
945         struct tm_shaper_profile *sp;
946
947         /* Check existing */
948         sp = tm_shaper_profile_search(dev, shaper_profile_id);
949         if (sp == NULL)
950                 return -rte_tm_error_set(error,
951                         EINVAL,
952                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
953                         NULL,
954                         rte_strerror(EINVAL));
955
956         /* Check unused */
957         if (sp->n_users)
958                 return -rte_tm_error_set(error,
959                         EBUSY,
960                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
961                         NULL,
962                         rte_strerror(EBUSY));
963
964         /* Remove from list */
965         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
966         p->soft.tm.h.n_shaper_profiles--;
967         free(sp);
968
969         return 0;
970 }
971
972 static struct tm_node *
973 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
974         struct tm_shared_shaper *ss)
975 {
976         struct pmd_internals *p = dev->data->dev_private;
977         struct tm_node_list *nl = &p->soft.tm.h.nodes;
978         struct tm_node *n;
979
980         /* Subport: each TC uses shared shaper  */
981         TAILQ_FOREACH(n, nl, node) {
982                 if (n->level != TM_NODE_LEVEL_TC ||
983                         n->params.n_shared_shapers == 0 ||
984                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
985                         continue;
986
987                 return n;
988         }
989
990         return NULL;
991 }
992
993 static int
994 update_subport_tc_rate(struct rte_eth_dev *dev,
995         struct tm_node *nt,
996         struct tm_shared_shaper *ss,
997         struct tm_shaper_profile *sp_new)
998 {
999         struct pmd_internals *p = dev->data->dev_private;
1000         uint32_t tc_id = tm_node_tc_id(dev, nt);
1001
1002         struct tm_node *np = nt->parent_node;
1003
1004         struct tm_node *ns = np->parent_node;
1005         uint32_t subport_id = tm_node_subport_id(dev, ns);
1006
1007         struct rte_sched_subport_params subport_params;
1008
1009         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1010                 ss->shaper_profile_id);
1011
1012         /* Derive new subport configuration. */
1013         memcpy(&subport_params,
1014                 &p->soft.tm.params.subport_params[subport_id],
1015                 sizeof(subport_params));
1016         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1017
1018         /* Update the subport configuration. */
1019         if (rte_sched_subport_config(p->soft.tm.sched,
1020                 subport_id, &subport_params))
1021                 return -1;
1022
1023         /* Commit changes. */
1024         sp_old->n_users--;
1025
1026         ss->shaper_profile_id = sp_new->shaper_profile_id;
1027         sp_new->n_users++;
1028
1029         memcpy(&p->soft.tm.params.subport_params[subport_id],
1030                 &subport_params,
1031                 sizeof(subport_params));
1032
1033         return 0;
1034 }
1035
1036 /* Traffic manager shared shaper add/update */
1037 static int
1038 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1039         uint32_t shared_shaper_id,
1040         uint32_t shaper_profile_id,
1041         struct rte_tm_error *error)
1042 {
1043         struct pmd_internals *p = dev->data->dev_private;
1044         struct tm_shared_shaper *ss;
1045         struct tm_shaper_profile *sp;
1046         struct tm_node *nt;
1047
1048         /* Shaper profile must be valid. */
1049         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1050         if (sp == NULL)
1051                 return -rte_tm_error_set(error,
1052                         EINVAL,
1053                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1054                         NULL,
1055                         rte_strerror(EINVAL));
1056
1057         /**
1058          * Add new shared shaper
1059          */
1060         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1061         if (ss == NULL) {
1062                 struct tm_shared_shaper_list *ssl =
1063                         &p->soft.tm.h.shared_shapers;
1064
1065                 /* Hierarchy must not be frozen */
1066                 if (p->soft.tm.hierarchy_frozen)
1067                         return -rte_tm_error_set(error,
1068                                 EBUSY,
1069                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1070                                 NULL,
1071                                 rte_strerror(EBUSY));
1072
1073                 /* Memory allocation */
1074                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1075                 if (ss == NULL)
1076                         return -rte_tm_error_set(error,
1077                                 ENOMEM,
1078                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1079                                 NULL,
1080                                 rte_strerror(ENOMEM));
1081
1082                 /* Fill in */
1083                 ss->shared_shaper_id = shared_shaper_id;
1084                 ss->shaper_profile_id = shaper_profile_id;
1085
1086                 /* Add to list */
1087                 TAILQ_INSERT_TAIL(ssl, ss, node);
1088                 p->soft.tm.h.n_shared_shapers++;
1089
1090                 return 0;
1091         }
1092
1093         /**
1094          * Update existing shared shaper
1095          */
1096         /* Hierarchy must be frozen (run-time update) */
1097         if (p->soft.tm.hierarchy_frozen == 0)
1098                 return -rte_tm_error_set(error,
1099                         EBUSY,
1100                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1101                         NULL,
1102                         rte_strerror(EBUSY));
1103
1104
1105         /* Propagate change. */
1106         nt = tm_shared_shaper_get_tc(dev, ss);
1107         if (update_subport_tc_rate(dev, nt, ss, sp))
1108                 return -rte_tm_error_set(error,
1109                         EINVAL,
1110                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1111                         NULL,
1112                         rte_strerror(EINVAL));
1113
1114         return 0;
1115 }
1116
1117 /* Traffic manager shared shaper delete */
1118 static int
1119 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1120         uint32_t shared_shaper_id,
1121         struct rte_tm_error *error)
1122 {
1123         struct pmd_internals *p = dev->data->dev_private;
1124         struct tm_shared_shaper *ss;
1125
1126         /* Check existing */
1127         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1128         if (ss == NULL)
1129                 return -rte_tm_error_set(error,
1130                         EINVAL,
1131                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1132                         NULL,
1133                         rte_strerror(EINVAL));
1134
1135         /* Check unused */
1136         if (ss->n_users)
1137                 return -rte_tm_error_set(error,
1138                         EBUSY,
1139                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1140                         NULL,
1141                         rte_strerror(EBUSY));
1142
1143         /* Remove from list */
1144         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1145         p->soft.tm.h.n_shared_shapers--;
1146         free(ss);
1147
1148         return 0;
1149 }
1150
1151 static int
1152 wred_profile_check(struct rte_eth_dev *dev,
1153         uint32_t wred_profile_id,
1154         struct rte_tm_wred_params *profile,
1155         struct rte_tm_error *error)
1156 {
1157         struct tm_wred_profile *wp;
1158         enum rte_tm_color color;
1159
1160         /* WRED profile ID must not be NONE. */
1161         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1162                 return -rte_tm_error_set(error,
1163                         EINVAL,
1164                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1165                         NULL,
1166                         rte_strerror(EINVAL));
1167
1168         /* WRED profile must not exist. */
1169         wp = tm_wred_profile_search(dev, wred_profile_id);
1170         if (wp)
1171                 return -rte_tm_error_set(error,
1172                         EEXIST,
1173                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1174                         NULL,
1175                         rte_strerror(EEXIST));
1176
1177         /* Profile must not be NULL. */
1178         if (profile == NULL)
1179                 return -rte_tm_error_set(error,
1180                         EINVAL,
1181                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1182                         NULL,
1183                         rte_strerror(EINVAL));
1184
1185         /* WRED profile should be in packet mode */
1186         if (profile->packet_mode == 0)
1187                 return -rte_tm_error_set(error,
1188                         ENOTSUP,
1189                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1190                         NULL,
1191                         rte_strerror(ENOTSUP));
1192
1193         /* min_th <= max_th, max_th > 0  */
1194         for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1195                 uint32_t min_th = profile->red_params[color].min_th;
1196                 uint32_t max_th = profile->red_params[color].max_th;
1197
1198                 if (min_th > max_th ||
1199                         max_th == 0 ||
1200                         min_th > UINT16_MAX ||
1201                         max_th > UINT16_MAX)
1202                         return -rte_tm_error_set(error,
1203                                 EINVAL,
1204                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1205                                 NULL,
1206                                 rte_strerror(EINVAL));
1207         }
1208
1209         return 0;
1210 }
1211
1212 /* Traffic manager WRED profile add */
1213 static int
1214 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1215         uint32_t wred_profile_id,
1216         struct rte_tm_wred_params *profile,
1217         struct rte_tm_error *error)
1218 {
1219         struct pmd_internals *p = dev->data->dev_private;
1220         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1221         struct tm_wred_profile *wp;
1222         int status;
1223
1224         /* Check input params */
1225         status = wred_profile_check(dev, wred_profile_id, profile, error);
1226         if (status)
1227                 return status;
1228
1229         /* Memory allocation */
1230         wp = calloc(1, sizeof(struct tm_wred_profile));
1231         if (wp == NULL)
1232                 return -rte_tm_error_set(error,
1233                         ENOMEM,
1234                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1235                         NULL,
1236                         rte_strerror(ENOMEM));
1237
1238         /* Fill in */
1239         wp->wred_profile_id = wred_profile_id;
1240         memcpy(&wp->params, profile, sizeof(wp->params));
1241
1242         /* Add to list */
1243         TAILQ_INSERT_TAIL(wpl, wp, node);
1244         p->soft.tm.h.n_wred_profiles++;
1245
1246         return 0;
1247 }
1248
1249 /* Traffic manager WRED profile delete */
1250 static int
1251 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1252         uint32_t wred_profile_id,
1253         struct rte_tm_error *error)
1254 {
1255         struct pmd_internals *p = dev->data->dev_private;
1256         struct tm_wred_profile *wp;
1257
1258         /* Check existing */
1259         wp = tm_wred_profile_search(dev, wred_profile_id);
1260         if (wp == NULL)
1261                 return -rte_tm_error_set(error,
1262                         EINVAL,
1263                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1264                         NULL,
1265                         rte_strerror(EINVAL));
1266
1267         /* Check unused */
1268         if (wp->n_users)
1269                 return -rte_tm_error_set(error,
1270                         EBUSY,
1271                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1272                         NULL,
1273                         rte_strerror(EBUSY));
1274
1275         /* Remove from list */
1276         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1277         p->soft.tm.h.n_wred_profiles--;
1278         free(wp);
1279
1280         return 0;
1281 }
1282
1283 static int
1284 node_add_check_port(struct rte_eth_dev *dev,
1285         uint32_t node_id,
1286         uint32_t parent_node_id __rte_unused,
1287         uint32_t priority,
1288         uint32_t weight,
1289         uint32_t level_id __rte_unused,
1290         struct rte_tm_node_params *params,
1291         struct rte_tm_error *error)
1292 {
1293         struct pmd_internals *p = dev->data->dev_private;
1294         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1295                 params->shaper_profile_id);
1296
1297         /* node type: non-leaf */
1298         if (node_id < p->params.tm.n_queues)
1299                 return -rte_tm_error_set(error,
1300                         EINVAL,
1301                         RTE_TM_ERROR_TYPE_NODE_ID,
1302                         NULL,
1303                         rte_strerror(EINVAL));
1304
1305         /* Priority must be 0 */
1306         if (priority != 0)
1307                 return -rte_tm_error_set(error,
1308                         EINVAL,
1309                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1310                         NULL,
1311                         rte_strerror(EINVAL));
1312
1313         /* Weight must be 1 */
1314         if (weight != 1)
1315                 return -rte_tm_error_set(error,
1316                         EINVAL,
1317                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1318                         NULL,
1319                         rte_strerror(EINVAL));
1320
1321         /* Shaper must be valid */
1322         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1323                 sp == NULL)
1324                 return -rte_tm_error_set(error,
1325                         EINVAL,
1326                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1327                         NULL,
1328                         rte_strerror(EINVAL));
1329
1330         /* No shared shapers */
1331         if (params->n_shared_shapers != 0)
1332                 return -rte_tm_error_set(error,
1333                         EINVAL,
1334                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1335                         NULL,
1336                         rte_strerror(EINVAL));
1337
1338         /* Number of SP priorities must be 1 */
1339         if (params->nonleaf.n_sp_priorities != 1)
1340                 return -rte_tm_error_set(error,
1341                         EINVAL,
1342                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1343                         NULL,
1344                         rte_strerror(EINVAL));
1345
1346         /* Stats */
1347         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1348                 return -rte_tm_error_set(error,
1349                         EINVAL,
1350                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1351                         NULL,
1352                         rte_strerror(EINVAL));
1353
1354         return 0;
1355 }
1356
1357 static int
1358 node_add_check_subport(struct rte_eth_dev *dev,
1359         uint32_t node_id,
1360         uint32_t parent_node_id __rte_unused,
1361         uint32_t priority,
1362         uint32_t weight,
1363         uint32_t level_id __rte_unused,
1364         struct rte_tm_node_params *params,
1365         struct rte_tm_error *error)
1366 {
1367         struct pmd_internals *p = dev->data->dev_private;
1368
1369         /* node type: non-leaf */
1370         if (node_id < p->params.tm.n_queues)
1371                 return -rte_tm_error_set(error,
1372                         EINVAL,
1373                         RTE_TM_ERROR_TYPE_NODE_ID,
1374                         NULL,
1375                         rte_strerror(EINVAL));
1376
1377         /* Priority must be 0 */
1378         if (priority != 0)
1379                 return -rte_tm_error_set(error,
1380                         EINVAL,
1381                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1382                         NULL,
1383                         rte_strerror(EINVAL));
1384
1385         /* Weight must be 1 */
1386         if (weight != 1)
1387                 return -rte_tm_error_set(error,
1388                         EINVAL,
1389                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1390                         NULL,
1391                         rte_strerror(EINVAL));
1392
1393         /* Shaper must be valid */
1394         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1395                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1396                 return -rte_tm_error_set(error,
1397                         EINVAL,
1398                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1399                         NULL,
1400                         rte_strerror(EINVAL));
1401
1402         /* No shared shapers */
1403         if (params->n_shared_shapers != 0)
1404                 return -rte_tm_error_set(error,
1405                         EINVAL,
1406                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1407                         NULL,
1408                         rte_strerror(EINVAL));
1409
1410         /* Number of SP priorities must be 1 */
1411         if (params->nonleaf.n_sp_priorities != 1)
1412                 return -rte_tm_error_set(error,
1413                         EINVAL,
1414                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1415                         NULL,
1416                         rte_strerror(EINVAL));
1417
1418         /* Stats */
1419         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1420                 return -rte_tm_error_set(error,
1421                         EINVAL,
1422                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1423                         NULL,
1424                         rte_strerror(EINVAL));
1425
1426         return 0;
1427 }
1428
1429 static int
1430 node_add_check_pipe(struct rte_eth_dev *dev,
1431         uint32_t node_id,
1432         uint32_t parent_node_id __rte_unused,
1433         uint32_t priority,
1434         uint32_t weight __rte_unused,
1435         uint32_t level_id __rte_unused,
1436         struct rte_tm_node_params *params,
1437         struct rte_tm_error *error)
1438 {
1439         struct pmd_internals *p = dev->data->dev_private;
1440
1441         /* node type: non-leaf */
1442         if (node_id < p->params.tm.n_queues)
1443                 return -rte_tm_error_set(error,
1444                         EINVAL,
1445                         RTE_TM_ERROR_TYPE_NODE_ID,
1446                         NULL,
1447                         rte_strerror(EINVAL));
1448
1449         /* Priority must be 0 */
1450         if (priority != 0)
1451                 return -rte_tm_error_set(error,
1452                         EINVAL,
1453                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1454                         NULL,
1455                         rte_strerror(EINVAL));
1456
1457         /* Shaper must be valid */
1458         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1459                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1460                 return -rte_tm_error_set(error,
1461                         EINVAL,
1462                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1463                         NULL,
1464                         rte_strerror(EINVAL));
1465
1466         /* No shared shapers */
1467         if (params->n_shared_shapers != 0)
1468                 return -rte_tm_error_set(error,
1469                         EINVAL,
1470                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1471                         NULL,
1472                         rte_strerror(EINVAL));
1473
1474         /* Number of SP priorities must be 4 */
1475         if (params->nonleaf.n_sp_priorities !=
1476                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1477                 return -rte_tm_error_set(error,
1478                         EINVAL,
1479                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1480                         NULL,
1481                         rte_strerror(EINVAL));
1482
1483         /* WFQ mode must be byte mode */
1484         if (params->nonleaf.wfq_weight_mode != NULL &&
1485                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1486                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1487                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1488                 params->nonleaf.wfq_weight_mode[3] != 0)
1489                 return -rte_tm_error_set(error,
1490                         EINVAL,
1491                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1492                         NULL,
1493                         rte_strerror(EINVAL));
1494
1495         /* Stats */
1496         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1497                 return -rte_tm_error_set(error,
1498                         EINVAL,
1499                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1500                         NULL,
1501                         rte_strerror(EINVAL));
1502
1503         return 0;
1504 }
1505
1506 static int
1507 node_add_check_tc(struct rte_eth_dev *dev,
1508         uint32_t node_id,
1509         uint32_t parent_node_id __rte_unused,
1510         uint32_t priority __rte_unused,
1511         uint32_t weight,
1512         uint32_t level_id __rte_unused,
1513         struct rte_tm_node_params *params,
1514         struct rte_tm_error *error)
1515 {
1516         struct pmd_internals *p = dev->data->dev_private;
1517
1518         /* node type: non-leaf */
1519         if (node_id < p->params.tm.n_queues)
1520                 return -rte_tm_error_set(error,
1521                         EINVAL,
1522                         RTE_TM_ERROR_TYPE_NODE_ID,
1523                         NULL,
1524                         rte_strerror(EINVAL));
1525
1526         /* Weight must be 1 */
1527         if (weight != 1)
1528                 return -rte_tm_error_set(error,
1529                         EINVAL,
1530                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1531                         NULL,
1532                         rte_strerror(EINVAL));
1533
1534         /* Shaper must be valid */
1535         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1536                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1537                 return -rte_tm_error_set(error,
1538                         EINVAL,
1539                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1540                         NULL,
1541                         rte_strerror(EINVAL));
1542
1543         /* Single valid shared shaper */
1544         if (params->n_shared_shapers > 1)
1545                 return -rte_tm_error_set(error,
1546                         EINVAL,
1547                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1548                         NULL,
1549                         rte_strerror(EINVAL));
1550
1551         if (params->n_shared_shapers == 1 &&
1552                 (params->shared_shaper_id == NULL ||
1553                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1554                 return -rte_tm_error_set(error,
1555                         EINVAL,
1556                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1557                         NULL,
1558                         rte_strerror(EINVAL));
1559
1560         /* Number of priorities must be 1 */
1561         if (params->nonleaf.n_sp_priorities != 1)
1562                 return -rte_tm_error_set(error,
1563                         EINVAL,
1564                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1565                         NULL,
1566                         rte_strerror(EINVAL));
1567
1568         /* Stats */
1569         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1570                 return -rte_tm_error_set(error,
1571                         EINVAL,
1572                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1573                         NULL,
1574                         rte_strerror(EINVAL));
1575
1576         return 0;
1577 }
1578
1579 static int
1580 node_add_check_queue(struct rte_eth_dev *dev,
1581         uint32_t node_id,
1582         uint32_t parent_node_id __rte_unused,
1583         uint32_t priority,
1584         uint32_t weight __rte_unused,
1585         uint32_t level_id __rte_unused,
1586         struct rte_tm_node_params *params,
1587         struct rte_tm_error *error)
1588 {
1589         struct pmd_internals *p = dev->data->dev_private;
1590
1591         /* node type: leaf */
1592         if (node_id >= p->params.tm.n_queues)
1593                 return -rte_tm_error_set(error,
1594                         EINVAL,
1595                         RTE_TM_ERROR_TYPE_NODE_ID,
1596                         NULL,
1597                         rte_strerror(EINVAL));
1598
1599         /* Priority must be 0 */
1600         if (priority != 0)
1601                 return -rte_tm_error_set(error,
1602                         EINVAL,
1603                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1604                         NULL,
1605                         rte_strerror(EINVAL));
1606
1607         /* No shaper */
1608         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1609                 return -rte_tm_error_set(error,
1610                         EINVAL,
1611                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1612                         NULL,
1613                         rte_strerror(EINVAL));
1614
1615         /* No shared shapers */
1616         if (params->n_shared_shapers != 0)
1617                 return -rte_tm_error_set(error,
1618                         EINVAL,
1619                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1620                         NULL,
1621                         rte_strerror(EINVAL));
1622
1623         /* Congestion management must not be head drop */
1624         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1625                 return -rte_tm_error_set(error,
1626                         EINVAL,
1627                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1628                         NULL,
1629                         rte_strerror(EINVAL));
1630
1631         /* Congestion management set to WRED */
1632         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1633                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1634                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1635                         wred_profile_id);
1636
1637                 /* WRED profile (for private WRED context) must be valid */
1638                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1639                         wp == NULL)
1640                         return -rte_tm_error_set(error,
1641                                 EINVAL,
1642                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1643                                 NULL,
1644                                 rte_strerror(EINVAL));
1645
1646                 /* No shared WRED contexts */
1647                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1648                         return -rte_tm_error_set(error,
1649                                 EINVAL,
1650                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1651                                 NULL,
1652                                 rte_strerror(EINVAL));
1653         }
1654
1655         /* Stats */
1656         if (params->stats_mask & ~STATS_MASK_QUEUE)
1657                 return -rte_tm_error_set(error,
1658                         EINVAL,
1659                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1660                         NULL,
1661                         rte_strerror(EINVAL));
1662
1663         return 0;
1664 }
1665
1666 static int
1667 node_add_check(struct rte_eth_dev *dev,
1668         uint32_t node_id,
1669         uint32_t parent_node_id,
1670         uint32_t priority,
1671         uint32_t weight,
1672         uint32_t level_id,
1673         struct rte_tm_node_params *params,
1674         struct rte_tm_error *error)
1675 {
1676         struct tm_node *pn;
1677         uint32_t level;
1678         int status;
1679
1680         /* node_id, parent_node_id:
1681          *    -node_id must not be RTE_TM_NODE_ID_NULL
1682          *    -node_id must not be in use
1683          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1684          *        -root node must not exist
1685          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1686          *        -parent_node_id must be valid
1687          */
1688         if (node_id == RTE_TM_NODE_ID_NULL)
1689                 return -rte_tm_error_set(error,
1690                         EINVAL,
1691                         RTE_TM_ERROR_TYPE_NODE_ID,
1692                         NULL,
1693                         rte_strerror(EINVAL));
1694
1695         if (tm_node_search(dev, node_id))
1696                 return -rte_tm_error_set(error,
1697                         EEXIST,
1698                         RTE_TM_ERROR_TYPE_NODE_ID,
1699                         NULL,
1700                         rte_strerror(EEXIST));
1701
1702         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1703                 pn = NULL;
1704                 if (tm_root_node_present(dev))
1705                         return -rte_tm_error_set(error,
1706                                 EEXIST,
1707                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1708                                 NULL,
1709                                 rte_strerror(EEXIST));
1710         } else {
1711                 pn = tm_node_search(dev, parent_node_id);
1712                 if (pn == NULL)
1713                         return -rte_tm_error_set(error,
1714                                 EINVAL,
1715                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1716                                 NULL,
1717                                 rte_strerror(EINVAL));
1718         }
1719
1720         /* priority: must be 0 .. 3 */
1721         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1722                 return -rte_tm_error_set(error,
1723                         EINVAL,
1724                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1725                         NULL,
1726                         rte_strerror(EINVAL));
1727
1728         /* weight: must be 1 .. 255 */
1729         if (weight == 0 || weight >= UINT8_MAX)
1730                 return -rte_tm_error_set(error,
1731                         EINVAL,
1732                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1733                         NULL,
1734                         rte_strerror(EINVAL));
1735
1736         /* level_id: if valid, then
1737          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1738          *        -level_id must be zero
1739          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1740          *        -level_id must be parent level ID plus one
1741          */
1742         level = (pn == NULL) ? 0 : pn->level + 1;
1743         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1744                 return -rte_tm_error_set(error,
1745                         EINVAL,
1746                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1747                         NULL,
1748                         rte_strerror(EINVAL));
1749
1750         /* params: must not be NULL */
1751         if (params == NULL)
1752                 return -rte_tm_error_set(error,
1753                         EINVAL,
1754                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1755                         NULL,
1756                         rte_strerror(EINVAL));
1757
1758         /* params: per level checks */
1759         switch (level) {
1760         case TM_NODE_LEVEL_PORT:
1761                 status = node_add_check_port(dev, node_id,
1762                         parent_node_id, priority, weight, level_id,
1763                         params, error);
1764                 if (status)
1765                         return status;
1766                 break;
1767
1768         case TM_NODE_LEVEL_SUBPORT:
1769                 status = node_add_check_subport(dev, node_id,
1770                         parent_node_id, priority, weight, level_id,
1771                         params, error);
1772                 if (status)
1773                         return status;
1774                 break;
1775
1776         case TM_NODE_LEVEL_PIPE:
1777                 status = node_add_check_pipe(dev, node_id,
1778                         parent_node_id, priority, weight, level_id,
1779                         params, error);
1780                 if (status)
1781                         return status;
1782                 break;
1783
1784         case TM_NODE_LEVEL_TC:
1785                 status = node_add_check_tc(dev, node_id,
1786                         parent_node_id, priority, weight, level_id,
1787                         params, error);
1788                 if (status)
1789                         return status;
1790                 break;
1791
1792         case TM_NODE_LEVEL_QUEUE:
1793                 status = node_add_check_queue(dev, node_id,
1794                         parent_node_id, priority, weight, level_id,
1795                         params, error);
1796                 if (status)
1797                         return status;
1798                 break;
1799
1800         default:
1801                 return -rte_tm_error_set(error,
1802                         EINVAL,
1803                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1804                         NULL,
1805                         rte_strerror(EINVAL));
1806         }
1807
1808         return 0;
1809 }
1810
1811 /* Traffic manager node add */
1812 static int
1813 pmd_tm_node_add(struct rte_eth_dev *dev,
1814         uint32_t node_id,
1815         uint32_t parent_node_id,
1816         uint32_t priority,
1817         uint32_t weight,
1818         uint32_t level_id,
1819         struct rte_tm_node_params *params,
1820         struct rte_tm_error *error)
1821 {
1822         struct pmd_internals *p = dev->data->dev_private;
1823         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1824         struct tm_node *n;
1825         uint32_t i;
1826         int status;
1827
1828         /* Checks */
1829         if (p->soft.tm.hierarchy_frozen)
1830                 return -rte_tm_error_set(error,
1831                         EBUSY,
1832                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1833                         NULL,
1834                         rte_strerror(EBUSY));
1835
1836         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1837                 level_id, params, error);
1838         if (status)
1839                 return status;
1840
1841         /* Memory allocation */
1842         n = calloc(1, sizeof(struct tm_node));
1843         if (n == NULL)
1844                 return -rte_tm_error_set(error,
1845                         ENOMEM,
1846                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1847                         NULL,
1848                         rte_strerror(ENOMEM));
1849
1850         /* Fill in */
1851         n->node_id = node_id;
1852         n->parent_node_id = parent_node_id;
1853         n->priority = priority;
1854         n->weight = weight;
1855
1856         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1857                 n->parent_node = tm_node_search(dev, parent_node_id);
1858                 n->level = n->parent_node->level + 1;
1859         }
1860
1861         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1862                 n->shaper_profile = tm_shaper_profile_search(dev,
1863                         params->shaper_profile_id);
1864
1865         if (n->level == TM_NODE_LEVEL_QUEUE &&
1866                 params->leaf.cman == RTE_TM_CMAN_WRED)
1867                 n->wred_profile = tm_wred_profile_search(dev,
1868                         params->leaf.wred.wred_profile_id);
1869
1870         memcpy(&n->params, params, sizeof(n->params));
1871
1872         /* Add to list */
1873         TAILQ_INSERT_TAIL(nl, n, node);
1874         p->soft.tm.h.n_nodes++;
1875
1876         /* Update dependencies */
1877         if (n->parent_node)
1878                 n->parent_node->n_children++;
1879
1880         if (n->shaper_profile)
1881                 n->shaper_profile->n_users++;
1882
1883         for (i = 0; i < params->n_shared_shapers; i++) {
1884                 struct tm_shared_shaper *ss;
1885
1886                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1887                 ss->n_users++;
1888         }
1889
1890         if (n->wred_profile)
1891                 n->wred_profile->n_users++;
1892
1893         p->soft.tm.h.n_tm_nodes[n->level]++;
1894
1895         return 0;
1896 }
1897
1898 /* Traffic manager node delete */
1899 static int
1900 pmd_tm_node_delete(struct rte_eth_dev *dev,
1901         uint32_t node_id,
1902         struct rte_tm_error *error)
1903 {
1904         struct pmd_internals *p = dev->data->dev_private;
1905         struct tm_node *n;
1906         uint32_t i;
1907
1908         /* Check hierarchy changes are currently allowed */
1909         if (p->soft.tm.hierarchy_frozen)
1910                 return -rte_tm_error_set(error,
1911                         EBUSY,
1912                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1913                         NULL,
1914                         rte_strerror(EBUSY));
1915
1916         /* Check existing */
1917         n = tm_node_search(dev, node_id);
1918         if (n == NULL)
1919                 return -rte_tm_error_set(error,
1920                         EINVAL,
1921                         RTE_TM_ERROR_TYPE_NODE_ID,
1922                         NULL,
1923                         rte_strerror(EINVAL));
1924
1925         /* Check unused */
1926         if (n->n_children)
1927                 return -rte_tm_error_set(error,
1928                         EBUSY,
1929                         RTE_TM_ERROR_TYPE_NODE_ID,
1930                         NULL,
1931                         rte_strerror(EBUSY));
1932
1933         /* Update dependencies */
1934         p->soft.tm.h.n_tm_nodes[n->level]--;
1935
1936         if (n->wred_profile)
1937                 n->wred_profile->n_users--;
1938
1939         for (i = 0; i < n->params.n_shared_shapers; i++) {
1940                 struct tm_shared_shaper *ss;
1941
1942                 ss = tm_shared_shaper_search(dev,
1943                                 n->params.shared_shaper_id[i]);
1944                 ss->n_users--;
1945         }
1946
1947         if (n->shaper_profile)
1948                 n->shaper_profile->n_users--;
1949
1950         if (n->parent_node)
1951                 n->parent_node->n_children--;
1952
1953         /* Remove from list */
1954         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
1955         p->soft.tm.h.n_nodes--;
1956         free(n);
1957
1958         return 0;
1959 }
1960
1961
1962 static void
1963 pipe_profile_build(struct rte_eth_dev *dev,
1964         struct tm_node *np,
1965         struct rte_sched_pipe_params *pp)
1966 {
1967         struct pmd_internals *p = dev->data->dev_private;
1968         struct tm_hierarchy *h = &p->soft.tm.h;
1969         struct tm_node_list *nl = &h->nodes;
1970         struct tm_node *nt, *nq;
1971
1972         memset(pp, 0, sizeof(*pp));
1973
1974         /* Pipe */
1975         pp->tb_rate = np->shaper_profile->params.peak.rate;
1976         pp->tb_size = np->shaper_profile->params.peak.size;
1977
1978         /* Traffic Class (TC) */
1979         pp->tc_period = PIPE_TC_PERIOD;
1980
1981 #ifdef RTE_SCHED_SUBPORT_TC_OV
1982         pp->tc_ov_weight = np->weight;
1983 #endif
1984
1985         TAILQ_FOREACH(nt, nl, node) {
1986                 uint32_t queue_id = 0;
1987
1988                 if (nt->level != TM_NODE_LEVEL_TC ||
1989                         nt->parent_node_id != np->node_id)
1990                         continue;
1991
1992                 pp->tc_rate[nt->priority] =
1993                         nt->shaper_profile->params.peak.rate;
1994
1995                 /* Queue */
1996                 TAILQ_FOREACH(nq, nl, node) {
1997                         uint32_t pipe_queue_id;
1998
1999                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2000                                 nq->parent_node_id != nt->node_id)
2001                                 continue;
2002
2003                         pipe_queue_id = nt->priority *
2004                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2005                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2006
2007                         queue_id++;
2008                 }
2009         }
2010 }
2011
2012 static int
2013 pipe_profile_free_exists(struct rte_eth_dev *dev,
2014         uint32_t *pipe_profile_id)
2015 {
2016         struct pmd_internals *p = dev->data->dev_private;
2017         struct tm_params *t = &p->soft.tm.params;
2018
2019         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2020                 *pipe_profile_id = t->n_pipe_profiles;
2021                 return 1;
2022         }
2023
2024         return 0;
2025 }
2026
2027 static int
2028 pipe_profile_exists(struct rte_eth_dev *dev,
2029         struct rte_sched_pipe_params *pp,
2030         uint32_t *pipe_profile_id)
2031 {
2032         struct pmd_internals *p = dev->data->dev_private;
2033         struct tm_params *t = &p->soft.tm.params;
2034         uint32_t i;
2035
2036         for (i = 0; i < t->n_pipe_profiles; i++)
2037                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2038                         if (pipe_profile_id)
2039                                 *pipe_profile_id = i;
2040                         return 1;
2041                 }
2042
2043         return 0;
2044 }
2045
2046 static void
2047 pipe_profile_install(struct rte_eth_dev *dev,
2048         struct rte_sched_pipe_params *pp,
2049         uint32_t pipe_profile_id)
2050 {
2051         struct pmd_internals *p = dev->data->dev_private;
2052         struct tm_params *t = &p->soft.tm.params;
2053
2054         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2055         t->n_pipe_profiles++;
2056 }
2057
2058 static void
2059 pipe_profile_mark(struct rte_eth_dev *dev,
2060         uint32_t subport_id,
2061         uint32_t pipe_id,
2062         uint32_t pipe_profile_id)
2063 {
2064         struct pmd_internals *p = dev->data->dev_private;
2065         struct tm_hierarchy *h = &p->soft.tm.h;
2066         struct tm_params *t = &p->soft.tm.params;
2067         uint32_t n_pipes_per_subport, pos;
2068
2069         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2070                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2071         pos = subport_id * n_pipes_per_subport + pipe_id;
2072
2073         t->pipe_to_profile[pos] = pipe_profile_id;
2074 }
2075
2076 static struct rte_sched_pipe_params *
2077 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2078 {
2079         struct pmd_internals *p = dev->data->dev_private;
2080         struct tm_hierarchy *h = &p->soft.tm.h;
2081         struct tm_params *t = &p->soft.tm.params;
2082         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2083                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2084
2085         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2086         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2087
2088         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2089         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2090
2091         return &t->pipe_profiles[pipe_profile_id];
2092 }
2093
2094 static int
2095 pipe_profiles_generate(struct rte_eth_dev *dev)
2096 {
2097         struct pmd_internals *p = dev->data->dev_private;
2098         struct tm_hierarchy *h = &p->soft.tm.h;
2099         struct tm_node_list *nl = &h->nodes;
2100         struct tm_node *ns, *np;
2101         uint32_t subport_id;
2102
2103         /* Objective: Fill in the following fields in struct tm_params:
2104          *    - pipe_profiles
2105          *    - n_pipe_profiles
2106          *    - pipe_to_profile
2107          */
2108
2109         subport_id = 0;
2110         TAILQ_FOREACH(ns, nl, node) {
2111                 uint32_t pipe_id;
2112
2113                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2114                         continue;
2115
2116                 pipe_id = 0;
2117                 TAILQ_FOREACH(np, nl, node) {
2118                         struct rte_sched_pipe_params pp;
2119                         uint32_t pos;
2120
2121                         if (np->level != TM_NODE_LEVEL_PIPE ||
2122                                 np->parent_node_id != ns->node_id)
2123                                 continue;
2124
2125                         pipe_profile_build(dev, np, &pp);
2126
2127                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2128                                 if (!pipe_profile_free_exists(dev, &pos))
2129                                         return -1;
2130
2131                                 pipe_profile_install(dev, &pp, pos);
2132                         }
2133
2134                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2135
2136                         pipe_id++;
2137                 }
2138
2139                 subport_id++;
2140         }
2141
2142         return 0;
2143 }
2144
2145 static struct tm_wred_profile *
2146 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2147 {
2148         struct pmd_internals *p = dev->data->dev_private;
2149         struct tm_hierarchy *h = &p->soft.tm.h;
2150         struct tm_node_list *nl = &h->nodes;
2151         struct tm_node *nq;
2152
2153         TAILQ_FOREACH(nq, nl, node) {
2154                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2155                         nq->parent_node->priority != tc_id)
2156                         continue;
2157
2158                 return nq->wred_profile;
2159         }
2160
2161         return NULL;
2162 }
2163
2164 #ifdef RTE_SCHED_RED
2165
2166 static void
2167 wred_profiles_set(struct rte_eth_dev *dev)
2168 {
2169         struct pmd_internals *p = dev->data->dev_private;
2170         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2171         uint32_t tc_id;
2172         enum rte_tm_color color;
2173
2174         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2175                 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2176                         struct rte_red_params *dst =
2177                                 &pp->red_params[tc_id][color];
2178                         struct tm_wred_profile *src_wp =
2179                                 tm_tc_wred_profile_get(dev, tc_id);
2180                         struct rte_tm_red_params *src =
2181                                 &src_wp->params.red_params[color];
2182
2183                         memcpy(dst, src, sizeof(*dst));
2184                 }
2185 }
2186
2187 #else
2188
2189 #define wred_profiles_set(dev)
2190
2191 #endif
2192
2193 static struct tm_shared_shaper *
2194 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2195 {
2196         return (tc_node->params.n_shared_shapers) ?
2197                 tm_shared_shaper_search(dev,
2198                         tc_node->params.shared_shaper_id[0]) :
2199                 NULL;
2200 }
2201
2202 static struct tm_shared_shaper *
2203 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2204         struct tm_node *subport_node,
2205         uint32_t tc_id)
2206 {
2207         struct pmd_internals *p = dev->data->dev_private;
2208         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2209         struct tm_node *n;
2210
2211         TAILQ_FOREACH(n, nl, node) {
2212                 if (n->level != TM_NODE_LEVEL_TC ||
2213                         n->parent_node->parent_node_id !=
2214                                 subport_node->node_id ||
2215                         n->priority != tc_id)
2216                         continue;
2217
2218                 return tm_tc_shared_shaper_get(dev, n);
2219         }
2220
2221         return NULL;
2222 }
2223
2224 static int
2225 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2226 {
2227         struct pmd_internals *p = dev->data->dev_private;
2228         struct tm_hierarchy *h = &p->soft.tm.h;
2229         struct tm_node_list *nl = &h->nodes;
2230         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2231         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2232         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2233         struct tm_shared_shaper *ss;
2234
2235         uint32_t n_pipes_per_subport;
2236
2237         /* Root node exists. */
2238         if (nr == NULL)
2239                 return -rte_tm_error_set(error,
2240                         EINVAL,
2241                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2242                         NULL,
2243                         rte_strerror(EINVAL));
2244
2245         /* There is at least one subport, max is not exceeded. */
2246         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2247                 return -rte_tm_error_set(error,
2248                         EINVAL,
2249                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2250                         NULL,
2251                         rte_strerror(EINVAL));
2252
2253         /* There is at least one pipe. */
2254         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2255                 return -rte_tm_error_set(error,
2256                         EINVAL,
2257                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2258                         NULL,
2259                         rte_strerror(EINVAL));
2260
2261         /* Number of pipes is the same for all subports. Maximum number of pipes
2262          * per subport is not exceeded.
2263          */
2264         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2265                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2266
2267         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2268                 return -rte_tm_error_set(error,
2269                         EINVAL,
2270                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2271                         NULL,
2272                         rte_strerror(EINVAL));
2273
2274         TAILQ_FOREACH(ns, nl, node) {
2275                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2276                         continue;
2277
2278                 if (ns->n_children != n_pipes_per_subport)
2279                         return -rte_tm_error_set(error,
2280                                 EINVAL,
2281                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2282                                 NULL,
2283                                 rte_strerror(EINVAL));
2284         }
2285
2286         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2287         TAILQ_FOREACH(np, nl, node) {
2288                 uint32_t mask = 0, mask_expected =
2289                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2290                                 uint32_t);
2291
2292                 if (np->level != TM_NODE_LEVEL_PIPE)
2293                         continue;
2294
2295                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2296                         return -rte_tm_error_set(error,
2297                                 EINVAL,
2298                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2299                                 NULL,
2300                                 rte_strerror(EINVAL));
2301
2302                 TAILQ_FOREACH(nt, nl, node) {
2303                         if (nt->level != TM_NODE_LEVEL_TC ||
2304                                 nt->parent_node_id != np->node_id)
2305                                 continue;
2306
2307                         mask |= 1 << nt->priority;
2308                 }
2309
2310                 if (mask != mask_expected)
2311                         return -rte_tm_error_set(error,
2312                                 EINVAL,
2313                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2314                                 NULL,
2315                                 rte_strerror(EINVAL));
2316         }
2317
2318         /* Each TC has exactly 4 packet queues. */
2319         TAILQ_FOREACH(nt, nl, node) {
2320                 if (nt->level != TM_NODE_LEVEL_TC)
2321                         continue;
2322
2323                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2324                         return -rte_tm_error_set(error,
2325                                 EINVAL,
2326                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2327                                 NULL,
2328                                 rte_strerror(EINVAL));
2329         }
2330
2331         /**
2332          * Shared shapers:
2333          *    -For each TC #i, all pipes in the same subport use the same
2334          *     shared shaper (or no shared shaper) for their TC#i.
2335          *    -Each shared shaper needs to have at least one user. All its
2336          *     users have to be TC nodes with the same priority and the same
2337          *     subport.
2338          */
2339         TAILQ_FOREACH(ns, nl, node) {
2340                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2341                 uint32_t id;
2342
2343                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2344                         continue;
2345
2346                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2347                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2348
2349                 TAILQ_FOREACH(nt, nl, node) {
2350                         struct tm_shared_shaper *subport_ss, *tc_ss;
2351
2352                         if (nt->level != TM_NODE_LEVEL_TC ||
2353                                 nt->parent_node->parent_node_id !=
2354                                         ns->node_id)
2355                                 continue;
2356
2357                         subport_ss = s[nt->priority];
2358                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2359
2360                         if (subport_ss == NULL && tc_ss == NULL)
2361                                 continue;
2362
2363                         if ((subport_ss == NULL && tc_ss != NULL) ||
2364                                 (subport_ss != NULL && tc_ss == NULL) ||
2365                                 subport_ss->shared_shaper_id !=
2366                                         tc_ss->shared_shaper_id)
2367                                 return -rte_tm_error_set(error,
2368                                         EINVAL,
2369                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2370                                         NULL,
2371                                         rte_strerror(EINVAL));
2372                 }
2373         }
2374
2375         TAILQ_FOREACH(ss, ssl, node) {
2376                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2377                 uint32_t n_users = 0;
2378
2379                 if (nt_any != NULL)
2380                         TAILQ_FOREACH(nt, nl, node) {
2381                                 if (nt->level != TM_NODE_LEVEL_TC ||
2382                                         nt->priority != nt_any->priority ||
2383                                         nt->parent_node->parent_node_id !=
2384                                         nt_any->parent_node->parent_node_id)
2385                                         continue;
2386
2387                                 n_users++;
2388                         }
2389
2390                 if (ss->n_users == 0 || ss->n_users != n_users)
2391                         return -rte_tm_error_set(error,
2392                                 EINVAL,
2393                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2394                                 NULL,
2395                                 rte_strerror(EINVAL));
2396         }
2397
2398         /* Not too many pipe profiles. */
2399         if (pipe_profiles_generate(dev))
2400                 return -rte_tm_error_set(error,
2401                         EINVAL,
2402                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2403                         NULL,
2404                         rte_strerror(EINVAL));
2405
2406         /**
2407          * WRED (when used, i.e. at least one WRED profile defined):
2408          *    -Each WRED profile must have at least one user.
2409          *    -All leaf nodes must have their private WRED context enabled.
2410          *    -For each TC #i, all leaf nodes must use the same WRED profile
2411          *     for their private WRED context.
2412          */
2413         if (h->n_wred_profiles) {
2414                 struct tm_wred_profile *wp;
2415                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2416                 uint32_t id;
2417
2418                 TAILQ_FOREACH(wp, wpl, node)
2419                         if (wp->n_users == 0)
2420                                 return -rte_tm_error_set(error,
2421                                         EINVAL,
2422                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2423                                         NULL,
2424                                         rte_strerror(EINVAL));
2425
2426                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2427                         w[id] = tm_tc_wred_profile_get(dev, id);
2428
2429                         if (w[id] == NULL)
2430                                 return -rte_tm_error_set(error,
2431                                         EINVAL,
2432                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2433                                         NULL,
2434                                         rte_strerror(EINVAL));
2435                 }
2436
2437                 TAILQ_FOREACH(nq, nl, node) {
2438                         uint32_t id;
2439
2440                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2441                                 continue;
2442
2443                         id = nq->parent_node->priority;
2444
2445                         if (nq->wred_profile == NULL ||
2446                                 nq->wred_profile->wred_profile_id !=
2447                                         w[id]->wred_profile_id)
2448                                 return -rte_tm_error_set(error,
2449                                         EINVAL,
2450                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2451                                         NULL,
2452                                         rte_strerror(EINVAL));
2453                 }
2454         }
2455
2456         return 0;
2457 }
2458
2459 static void
2460 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2461 {
2462         struct pmd_internals *p = dev->data->dev_private;
2463         struct tm_params *t = &p->soft.tm.params;
2464         struct tm_hierarchy *h = &p->soft.tm.h;
2465
2466         struct tm_node_list *nl = &h->nodes;
2467         struct tm_node *root = tm_root_node_present(dev), *n;
2468
2469         uint32_t subport_id;
2470
2471         t->port_params = (struct rte_sched_port_params) {
2472                 .name = dev->data->name,
2473                 .socket = dev->data->numa_node,
2474                 .rate = root->shaper_profile->params.peak.rate,
2475                 .mtu = dev->data->mtu,
2476                 .frame_overhead =
2477                         root->shaper_profile->params.pkt_length_adjust,
2478                 .n_subports_per_port = root->n_children,
2479                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2480                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2481                 .qsize = {p->params.tm.qsize[0],
2482                         p->params.tm.qsize[1],
2483                         p->params.tm.qsize[2],
2484                         p->params.tm.qsize[3],
2485                 },
2486                 .pipe_profiles = t->pipe_profiles,
2487                 .n_pipe_profiles = t->n_pipe_profiles,
2488         };
2489
2490         wred_profiles_set(dev);
2491
2492         subport_id = 0;
2493         TAILQ_FOREACH(n, nl, node) {
2494                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2495                 uint32_t i;
2496
2497                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2498                         continue;
2499
2500                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2501                         struct tm_shared_shaper *ss;
2502                         struct tm_shaper_profile *sp;
2503
2504                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2505                         sp = (ss) ? tm_shaper_profile_search(dev,
2506                                 ss->shaper_profile_id) :
2507                                 n->shaper_profile;
2508                         tc_rate[i] = sp->params.peak.rate;
2509                 }
2510
2511                 t->subport_params[subport_id] =
2512                         (struct rte_sched_subport_params) {
2513                                 .tb_rate = n->shaper_profile->params.peak.rate,
2514                                 .tb_size = n->shaper_profile->params.peak.size,
2515
2516                                 .tc_rate = {tc_rate[0],
2517                                         tc_rate[1],
2518                                         tc_rate[2],
2519                                         tc_rate[3],
2520                         },
2521                         .tc_period = SUBPORT_TC_PERIOD,
2522                 };
2523
2524                 subport_id++;
2525         }
2526 }
2527
2528 /* Traffic manager hierarchy commit */
2529 static int
2530 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2531         int clear_on_fail,
2532         struct rte_tm_error *error)
2533 {
2534         struct pmd_internals *p = dev->data->dev_private;
2535         int status;
2536
2537         /* Checks */
2538         if (p->soft.tm.hierarchy_frozen)
2539                 return -rte_tm_error_set(error,
2540                         EBUSY,
2541                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2542                         NULL,
2543                         rte_strerror(EBUSY));
2544
2545         status = hierarchy_commit_check(dev, error);
2546         if (status) {
2547                 if (clear_on_fail) {
2548                         tm_hierarchy_uninit(p);
2549                         tm_hierarchy_init(p);
2550                 }
2551
2552                 return status;
2553         }
2554
2555         /* Create blueprints */
2556         hierarchy_blueprints_create(dev);
2557
2558         /* Freeze hierarchy */
2559         p->soft.tm.hierarchy_frozen = 1;
2560
2561         return 0;
2562 }
2563
2564 #ifdef RTE_SCHED_SUBPORT_TC_OV
2565
2566 static int
2567 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2568 {
2569         struct pmd_internals *p = dev->data->dev_private;
2570         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2571
2572         struct tm_node *ns = np->parent_node;
2573         uint32_t subport_id = tm_node_subport_id(dev, ns);
2574
2575         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2576         struct rte_sched_pipe_params profile1;
2577         uint32_t pipe_profile_id;
2578
2579         /* Derive new pipe profile. */
2580         memcpy(&profile1, profile0, sizeof(profile1));
2581         profile1.tc_ov_weight = (uint8_t)weight;
2582
2583         /* Since implementation does not allow adding more pipe profiles after
2584          * port configuration, the pipe configuration can be successfully
2585          * updated only if the new profile is also part of the existing set of
2586          * pipe profiles.
2587          */
2588         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2589                 return -1;
2590
2591         /* Update the pipe profile used by the current pipe. */
2592         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2593                 (int32_t)pipe_profile_id))
2594                 return -1;
2595
2596         /* Commit changes. */
2597         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2598         np->weight = weight;
2599
2600         return 0;
2601 }
2602
2603 #endif
2604
2605 static int
2606 update_queue_weight(struct rte_eth_dev *dev,
2607         struct tm_node *nq, uint32_t weight)
2608 {
2609         struct pmd_internals *p = dev->data->dev_private;
2610         uint32_t queue_id = tm_node_queue_id(dev, nq);
2611
2612         struct tm_node *nt = nq->parent_node;
2613         uint32_t tc_id = tm_node_tc_id(dev, nt);
2614
2615         struct tm_node *np = nt->parent_node;
2616         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2617
2618         struct tm_node *ns = np->parent_node;
2619         uint32_t subport_id = tm_node_subport_id(dev, ns);
2620
2621         uint32_t pipe_queue_id =
2622                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2623
2624         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2625         struct rte_sched_pipe_params profile1;
2626         uint32_t pipe_profile_id;
2627
2628         /* Derive new pipe profile. */
2629         memcpy(&profile1, profile0, sizeof(profile1));
2630         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2631
2632         /* Since implementation does not allow adding more pipe profiles after
2633          * port configuration, the pipe configuration can be successfully
2634          * updated only if the new profile is also part of the existing set
2635          * of pipe profiles.
2636          */
2637         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2638                 return -1;
2639
2640         /* Update the pipe profile used by the current pipe. */
2641         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2642                 (int32_t)pipe_profile_id))
2643                 return -1;
2644
2645         /* Commit changes. */
2646         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2647         nq->weight = weight;
2648
2649         return 0;
2650 }
2651
2652 /* Traffic manager node parent update */
2653 static int
2654 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2655         uint32_t node_id,
2656         uint32_t parent_node_id,
2657         uint32_t priority,
2658         uint32_t weight,
2659         struct rte_tm_error *error)
2660 {
2661         struct tm_node *n;
2662
2663         /* Port must be started and TM used. */
2664         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2665                 return -rte_tm_error_set(error,
2666                         EBUSY,
2667                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2668                         NULL,
2669                         rte_strerror(EBUSY));
2670
2671         /* Node must be valid */
2672         n = tm_node_search(dev, node_id);
2673         if (n == NULL)
2674                 return -rte_tm_error_set(error,
2675                         EINVAL,
2676                         RTE_TM_ERROR_TYPE_NODE_ID,
2677                         NULL,
2678                         rte_strerror(EINVAL));
2679
2680         /* Parent node must be the same */
2681         if (n->parent_node_id != parent_node_id)
2682                 return -rte_tm_error_set(error,
2683                         EINVAL,
2684                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2685                         NULL,
2686                         rte_strerror(EINVAL));
2687
2688         /* Priority must be the same */
2689         if (n->priority != priority)
2690                 return -rte_tm_error_set(error,
2691                         EINVAL,
2692                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2693                         NULL,
2694                         rte_strerror(EINVAL));
2695
2696         /* weight: must be 1 .. 255 */
2697         if (weight == 0 || weight >= UINT8_MAX)
2698                 return -rte_tm_error_set(error,
2699                         EINVAL,
2700                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2701                         NULL,
2702                         rte_strerror(EINVAL));
2703
2704         switch (n->level) {
2705         case TM_NODE_LEVEL_PORT:
2706                 return -rte_tm_error_set(error,
2707                         EINVAL,
2708                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2709                         NULL,
2710                         rte_strerror(EINVAL));
2711                 /* fall-through */
2712         case TM_NODE_LEVEL_SUBPORT:
2713                 return -rte_tm_error_set(error,
2714                         EINVAL,
2715                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2716                         NULL,
2717                         rte_strerror(EINVAL));
2718                 /* fall-through */
2719         case TM_NODE_LEVEL_PIPE:
2720 #ifdef RTE_SCHED_SUBPORT_TC_OV
2721                 if (update_pipe_weight(dev, n, weight))
2722                         return -rte_tm_error_set(error,
2723                                 EINVAL,
2724                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2725                                 NULL,
2726                                 rte_strerror(EINVAL));
2727                 return 0;
2728 #else
2729                 return -rte_tm_error_set(error,
2730                         EINVAL,
2731                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2732                         NULL,
2733                         rte_strerror(EINVAL));
2734 #endif
2735                 /* fall-through */
2736         case TM_NODE_LEVEL_TC:
2737                 return -rte_tm_error_set(error,
2738                         EINVAL,
2739                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2740                         NULL,
2741                         rte_strerror(EINVAL));
2742                 /* fall-through */
2743         case TM_NODE_LEVEL_QUEUE:
2744                 /* fall-through */
2745         default:
2746                 if (update_queue_weight(dev, n, weight))
2747                         return -rte_tm_error_set(error,
2748                                 EINVAL,
2749                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2750                                 NULL,
2751                                 rte_strerror(EINVAL));
2752                 return 0;
2753         }
2754 }
2755
2756 static int
2757 update_subport_rate(struct rte_eth_dev *dev,
2758         struct tm_node *ns,
2759         struct tm_shaper_profile *sp)
2760 {
2761         struct pmd_internals *p = dev->data->dev_private;
2762         uint32_t subport_id = tm_node_subport_id(dev, ns);
2763
2764         struct rte_sched_subport_params subport_params;
2765
2766         /* Derive new subport configuration. */
2767         memcpy(&subport_params,
2768                 &p->soft.tm.params.subport_params[subport_id],
2769                 sizeof(subport_params));
2770         subport_params.tb_rate = sp->params.peak.rate;
2771         subport_params.tb_size = sp->params.peak.size;
2772
2773         /* Update the subport configuration. */
2774         if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2775                 &subport_params))
2776                 return -1;
2777
2778         /* Commit changes. */
2779         ns->shaper_profile->n_users--;
2780
2781         ns->shaper_profile = sp;
2782         ns->params.shaper_profile_id = sp->shaper_profile_id;
2783         sp->n_users++;
2784
2785         memcpy(&p->soft.tm.params.subport_params[subport_id],
2786                 &subport_params,
2787                 sizeof(subport_params));
2788
2789         return 0;
2790 }
2791
2792 static int
2793 update_pipe_rate(struct rte_eth_dev *dev,
2794         struct tm_node *np,
2795         struct tm_shaper_profile *sp)
2796 {
2797         struct pmd_internals *p = dev->data->dev_private;
2798         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2799
2800         struct tm_node *ns = np->parent_node;
2801         uint32_t subport_id = tm_node_subport_id(dev, ns);
2802
2803         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2804         struct rte_sched_pipe_params profile1;
2805         uint32_t pipe_profile_id;
2806
2807         /* Derive new pipe profile. */
2808         memcpy(&profile1, profile0, sizeof(profile1));
2809         profile1.tb_rate = sp->params.peak.rate;
2810         profile1.tb_size = sp->params.peak.size;
2811
2812         /* Since implementation does not allow adding more pipe profiles after
2813          * port configuration, the pipe configuration can be successfully
2814          * updated only if the new profile is also part of the existing set of
2815          * pipe profiles.
2816          */
2817         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2818                 return -1;
2819
2820         /* Update the pipe profile used by the current pipe. */
2821         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2822                 (int32_t)pipe_profile_id))
2823                 return -1;
2824
2825         /* Commit changes. */
2826         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2827         np->shaper_profile->n_users--;
2828         np->shaper_profile = sp;
2829         np->params.shaper_profile_id = sp->shaper_profile_id;
2830         sp->n_users++;
2831
2832         return 0;
2833 }
2834
2835 static int
2836 update_tc_rate(struct rte_eth_dev *dev,
2837         struct tm_node *nt,
2838         struct tm_shaper_profile *sp)
2839 {
2840         struct pmd_internals *p = dev->data->dev_private;
2841         uint32_t tc_id = tm_node_tc_id(dev, nt);
2842
2843         struct tm_node *np = nt->parent_node;
2844         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2845
2846         struct tm_node *ns = np->parent_node;
2847         uint32_t subport_id = tm_node_subport_id(dev, ns);
2848
2849         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2850         struct rte_sched_pipe_params profile1;
2851         uint32_t pipe_profile_id;
2852
2853         /* Derive new pipe profile. */
2854         memcpy(&profile1, profile0, sizeof(profile1));
2855         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2856
2857         /* Since implementation does not allow adding more pipe profiles after
2858          * port configuration, the pipe configuration can be successfully
2859          * updated only if the new profile is also part of the existing set of
2860          * pipe profiles.
2861          */
2862         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2863                 return -1;
2864
2865         /* Update the pipe profile used by the current pipe. */
2866         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2867                 (int32_t)pipe_profile_id))
2868                 return -1;
2869
2870         /* Commit changes. */
2871         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2872         nt->shaper_profile->n_users--;
2873         nt->shaper_profile = sp;
2874         nt->params.shaper_profile_id = sp->shaper_profile_id;
2875         sp->n_users++;
2876
2877         return 0;
2878 }
2879
2880 /* Traffic manager node shaper update */
2881 static int
2882 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2883         uint32_t node_id,
2884         uint32_t shaper_profile_id,
2885         struct rte_tm_error *error)
2886 {
2887         struct tm_node *n;
2888         struct tm_shaper_profile *sp;
2889
2890         /* Port must be started and TM used. */
2891         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2892                 return -rte_tm_error_set(error,
2893                         EBUSY,
2894                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2895                         NULL,
2896                         rte_strerror(EBUSY));
2897
2898         /* Node must be valid */
2899         n = tm_node_search(dev, node_id);
2900         if (n == NULL)
2901                 return -rte_tm_error_set(error,
2902                         EINVAL,
2903                         RTE_TM_ERROR_TYPE_NODE_ID,
2904                         NULL,
2905                         rte_strerror(EINVAL));
2906
2907         /* Shaper profile must be valid. */
2908         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2909         if (sp == NULL)
2910                 return -rte_tm_error_set(error,
2911                         EINVAL,
2912                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2913                         NULL,
2914                         rte_strerror(EINVAL));
2915
2916         switch (n->level) {
2917         case TM_NODE_LEVEL_PORT:
2918                 return -rte_tm_error_set(error,
2919                         EINVAL,
2920                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2921                         NULL,
2922                         rte_strerror(EINVAL));
2923                 /* fall-through */
2924         case TM_NODE_LEVEL_SUBPORT:
2925                 if (update_subport_rate(dev, n, sp))
2926                         return -rte_tm_error_set(error,
2927                                 EINVAL,
2928                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2929                                 NULL,
2930                                 rte_strerror(EINVAL));
2931                 return 0;
2932                 /* fall-through */
2933         case TM_NODE_LEVEL_PIPE:
2934                 if (update_pipe_rate(dev, n, sp))
2935                         return -rte_tm_error_set(error,
2936                                 EINVAL,
2937                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2938                                 NULL,
2939                                 rte_strerror(EINVAL));
2940                 return 0;
2941                 /* fall-through */
2942         case TM_NODE_LEVEL_TC:
2943                 if (update_tc_rate(dev, n, sp))
2944                         return -rte_tm_error_set(error,
2945                                 EINVAL,
2946                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2947                                 NULL,
2948                                 rte_strerror(EINVAL));
2949                 return 0;
2950                 /* fall-through */
2951         case TM_NODE_LEVEL_QUEUE:
2952                 /* fall-through */
2953         default:
2954                 return -rte_tm_error_set(error,
2955                         EINVAL,
2956                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2957                         NULL,
2958                         rte_strerror(EINVAL));
2959         }
2960 }
2961
2962 static inline uint32_t
2963 tm_port_queue_id(struct rte_eth_dev *dev,
2964         uint32_t port_subport_id,
2965         uint32_t subport_pipe_id,
2966         uint32_t pipe_tc_id,
2967         uint32_t tc_queue_id)
2968 {
2969         struct pmd_internals *p = dev->data->dev_private;
2970         struct tm_hierarchy *h = &p->soft.tm.h;
2971         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2972                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2973
2974         uint32_t port_pipe_id =
2975                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
2976         uint32_t port_tc_id =
2977                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
2978         uint32_t port_queue_id =
2979                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
2980
2981         return port_queue_id;
2982 }
2983
2984 static int
2985 read_port_stats(struct rte_eth_dev *dev,
2986         struct tm_node *nr,
2987         struct rte_tm_node_stats *stats,
2988         uint64_t *stats_mask,
2989         int clear)
2990 {
2991         struct pmd_internals *p = dev->data->dev_private;
2992         struct tm_hierarchy *h = &p->soft.tm.h;
2993         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2994         uint32_t subport_id;
2995
2996         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
2997                 struct rte_sched_subport_stats s;
2998                 uint32_t tc_ov, id;
2999
3000                 /* Stats read */
3001                 int status = rte_sched_subport_read_stats(
3002                         p->soft.tm.sched,
3003                         subport_id,
3004                         &s,
3005                         &tc_ov);
3006                 if (status)
3007                         return status;
3008
3009                 /* Stats accumulate */
3010                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3011                         nr->stats.n_pkts +=
3012                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3013                         nr->stats.n_bytes +=
3014                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3015                         nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3016                                 s.n_pkts_tc_dropped[id];
3017                         nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3018                                 s.n_bytes_tc_dropped[id];
3019                 }
3020         }
3021
3022         /* Stats copy */
3023         if (stats)
3024                 memcpy(stats, &nr->stats, sizeof(*stats));
3025
3026         if (stats_mask)
3027                 *stats_mask = STATS_MASK_DEFAULT;
3028
3029         /* Stats clear */
3030         if (clear)
3031                 memset(&nr->stats, 0, sizeof(nr->stats));
3032
3033         return 0;
3034 }
3035
3036 static int
3037 read_subport_stats(struct rte_eth_dev *dev,
3038         struct tm_node *ns,
3039         struct rte_tm_node_stats *stats,
3040         uint64_t *stats_mask,
3041         int clear)
3042 {
3043         struct pmd_internals *p = dev->data->dev_private;
3044         uint32_t subport_id = tm_node_subport_id(dev, ns);
3045         struct rte_sched_subport_stats s;
3046         uint32_t tc_ov, tc_id;
3047
3048         /* Stats read */
3049         int status = rte_sched_subport_read_stats(
3050                 p->soft.tm.sched,
3051                 subport_id,
3052                 &s,
3053                 &tc_ov);
3054         if (status)
3055                 return status;
3056
3057         /* Stats accumulate */
3058         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3059                 ns->stats.n_pkts +=
3060                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3061                 ns->stats.n_bytes +=
3062                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3063                 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3064                         s.n_pkts_tc_dropped[tc_id];
3065                 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3066                         s.n_bytes_tc_dropped[tc_id];
3067         }
3068
3069         /* Stats copy */
3070         if (stats)
3071                 memcpy(stats, &ns->stats, sizeof(*stats));
3072
3073         if (stats_mask)
3074                 *stats_mask = STATS_MASK_DEFAULT;
3075
3076         /* Stats clear */
3077         if (clear)
3078                 memset(&ns->stats, 0, sizeof(ns->stats));
3079
3080         return 0;
3081 }
3082
3083 static int
3084 read_pipe_stats(struct rte_eth_dev *dev,
3085         struct tm_node *np,
3086         struct rte_tm_node_stats *stats,
3087         uint64_t *stats_mask,
3088         int clear)
3089 {
3090         struct pmd_internals *p = dev->data->dev_private;
3091
3092         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3093
3094         struct tm_node *ns = np->parent_node;
3095         uint32_t subport_id = tm_node_subport_id(dev, ns);
3096
3097         uint32_t i;
3098
3099         /* Stats read */
3100         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3101                 struct rte_sched_queue_stats s;
3102                 uint16_t qlen;
3103
3104                 uint32_t qid = tm_port_queue_id(dev,
3105                         subport_id,
3106                         pipe_id,
3107                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3108                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3109
3110                 int status = rte_sched_queue_read_stats(
3111                         p->soft.tm.sched,
3112                         qid,
3113                         &s,
3114                         &qlen);
3115                 if (status)
3116                         return status;
3117
3118                 /* Stats accumulate */
3119                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3120                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3121                 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3122                 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3123                         s.n_bytes_dropped;
3124                 np->stats.leaf.n_pkts_queued = qlen;
3125         }
3126
3127         /* Stats copy */
3128         if (stats)
3129                 memcpy(stats, &np->stats, sizeof(*stats));
3130
3131         if (stats_mask)
3132                 *stats_mask = STATS_MASK_DEFAULT;
3133
3134         /* Stats clear */
3135         if (clear)
3136                 memset(&np->stats, 0, sizeof(np->stats));
3137
3138         return 0;
3139 }
3140
3141 static int
3142 read_tc_stats(struct rte_eth_dev *dev,
3143         struct tm_node *nt,
3144         struct rte_tm_node_stats *stats,
3145         uint64_t *stats_mask,
3146         int clear)
3147 {
3148         struct pmd_internals *p = dev->data->dev_private;
3149
3150         uint32_t tc_id = tm_node_tc_id(dev, nt);
3151
3152         struct tm_node *np = nt->parent_node;
3153         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3154
3155         struct tm_node *ns = np->parent_node;
3156         uint32_t subport_id = tm_node_subport_id(dev, ns);
3157
3158         uint32_t i;
3159
3160         /* Stats read */
3161         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3162                 struct rte_sched_queue_stats s;
3163                 uint16_t qlen;
3164
3165                 uint32_t qid = tm_port_queue_id(dev,
3166                         subport_id,
3167                         pipe_id,
3168                         tc_id,
3169                         i);
3170
3171                 int status = rte_sched_queue_read_stats(
3172                         p->soft.tm.sched,
3173                         qid,
3174                         &s,
3175                         &qlen);
3176                 if (status)
3177                         return status;
3178
3179                 /* Stats accumulate */
3180                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3181                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3182                 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3183                 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3184                         s.n_bytes_dropped;
3185                 nt->stats.leaf.n_pkts_queued = qlen;
3186         }
3187
3188         /* Stats copy */
3189         if (stats)
3190                 memcpy(stats, &nt->stats, sizeof(*stats));
3191
3192         if (stats_mask)
3193                 *stats_mask = STATS_MASK_DEFAULT;
3194
3195         /* Stats clear */
3196         if (clear)
3197                 memset(&nt->stats, 0, sizeof(nt->stats));
3198
3199         return 0;
3200 }
3201
3202 static int
3203 read_queue_stats(struct rte_eth_dev *dev,
3204         struct tm_node *nq,
3205         struct rte_tm_node_stats *stats,
3206         uint64_t *stats_mask,
3207         int clear)
3208 {
3209         struct pmd_internals *p = dev->data->dev_private;
3210         struct rte_sched_queue_stats s;
3211         uint16_t qlen;
3212
3213         uint32_t queue_id = tm_node_queue_id(dev, nq);
3214
3215         struct tm_node *nt = nq->parent_node;
3216         uint32_t tc_id = tm_node_tc_id(dev, nt);
3217
3218         struct tm_node *np = nt->parent_node;
3219         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3220
3221         struct tm_node *ns = np->parent_node;
3222         uint32_t subport_id = tm_node_subport_id(dev, ns);
3223
3224         /* Stats read */
3225         uint32_t qid = tm_port_queue_id(dev,
3226                 subport_id,
3227                 pipe_id,
3228                 tc_id,
3229                 queue_id);
3230
3231         int status = rte_sched_queue_read_stats(
3232                 p->soft.tm.sched,
3233                 qid,
3234                 &s,
3235                 &qlen);
3236         if (status)
3237                 return status;
3238
3239         /* Stats accumulate */
3240         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3241         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3242         nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3243         nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3244                 s.n_bytes_dropped;
3245         nq->stats.leaf.n_pkts_queued = qlen;
3246
3247         /* Stats copy */
3248         if (stats)
3249                 memcpy(stats, &nq->stats, sizeof(*stats));
3250
3251         if (stats_mask)
3252                 *stats_mask = STATS_MASK_QUEUE;
3253
3254         /* Stats clear */
3255         if (clear)
3256                 memset(&nq->stats, 0, sizeof(nq->stats));
3257
3258         return 0;
3259 }
3260
3261 /* Traffic manager read stats counters for specific node */
3262 static int
3263 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3264         uint32_t node_id,
3265         struct rte_tm_node_stats *stats,
3266         uint64_t *stats_mask,
3267         int clear,
3268         struct rte_tm_error *error)
3269 {
3270         struct tm_node *n;
3271
3272         /* Port must be started and TM used. */
3273         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3274                 return -rte_tm_error_set(error,
3275                         EBUSY,
3276                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3277                         NULL,
3278                         rte_strerror(EBUSY));
3279
3280         /* Node must be valid */
3281         n = tm_node_search(dev, node_id);
3282         if (n == NULL)
3283                 return -rte_tm_error_set(error,
3284                         EINVAL,
3285                         RTE_TM_ERROR_TYPE_NODE_ID,
3286                         NULL,
3287                         rte_strerror(EINVAL));
3288
3289         switch (n->level) {
3290         case TM_NODE_LEVEL_PORT:
3291                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3292                         return -rte_tm_error_set(error,
3293                                 EINVAL,
3294                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3295                                 NULL,
3296                                 rte_strerror(EINVAL));
3297                 return 0;
3298
3299         case TM_NODE_LEVEL_SUBPORT:
3300                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3301                         return -rte_tm_error_set(error,
3302                                 EINVAL,
3303                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3304                                 NULL,
3305                                 rte_strerror(EINVAL));
3306                 return 0;
3307
3308         case TM_NODE_LEVEL_PIPE:
3309                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3310                         return -rte_tm_error_set(error,
3311                                 EINVAL,
3312                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3313                                 NULL,
3314                                 rte_strerror(EINVAL));
3315                 return 0;
3316
3317         case TM_NODE_LEVEL_TC:
3318                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3319                         return -rte_tm_error_set(error,
3320                                 EINVAL,
3321                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3322                                 NULL,
3323                                 rte_strerror(EINVAL));
3324                 return 0;
3325
3326         case TM_NODE_LEVEL_QUEUE:
3327         default:
3328                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3329                         return -rte_tm_error_set(error,
3330                                 EINVAL,
3331                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3332                                 NULL,
3333                                 rte_strerror(EINVAL));
3334                 return 0;
3335         }
3336 }
3337
3338 const struct rte_tm_ops pmd_tm_ops = {
3339         .node_type_get = pmd_tm_node_type_get,
3340         .capabilities_get = pmd_tm_capabilities_get,
3341         .level_capabilities_get = pmd_tm_level_capabilities_get,
3342         .node_capabilities_get = pmd_tm_node_capabilities_get,
3343
3344         .wred_profile_add = pmd_tm_wred_profile_add,
3345         .wred_profile_delete = pmd_tm_wred_profile_delete,
3346         .shared_wred_context_add_update = NULL,
3347         .shared_wred_context_delete = NULL,
3348
3349         .shaper_profile_add = pmd_tm_shaper_profile_add,
3350         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3351         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3352         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3353
3354         .node_add = pmd_tm_node_add,
3355         .node_delete = pmd_tm_node_delete,
3356         .node_suspend = NULL,
3357         .node_resume = NULL,
3358         .hierarchy_commit = pmd_tm_hierarchy_commit,
3359
3360         .node_parent_update = pmd_tm_node_parent_update,
3361         .node_shaper_update = pmd_tm_node_shaper_update,
3362         .node_shared_shaper_update = NULL,
3363         .node_stats_update = NULL,
3364         .node_wfq_weight_mode_update = NULL,
3365         .node_cman_update = NULL,
3366         .node_wred_context_update = NULL,
3367         .node_shared_wred_context_update = NULL,
3368
3369         .node_stats_read = pmd_tm_node_stats_read,
3370 };