net/bnxt: replace memory barrier for doorbell response
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21         TAILQ_INIT(&p->tmgr_port_list);
22
23         return 0;
24 }
25
26 void
27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29         for ( ; ; ) {
30                 struct softnic_tmgr_port *tmgr_port;
31
32                 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33                 if (tmgr_port == NULL)
34                         break;
35
36                 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37                 rte_sched_port_free(tmgr_port->s);
38                 free(tmgr_port);
39         }
40 }
41
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
44         const char *name)
45 {
46         struct softnic_tmgr_port *tmgr_port;
47
48         if (name == NULL)
49                 return NULL;
50
51         TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52                 if (strcmp(tmgr_port->name, name) == 0)
53                         return tmgr_port;
54
55         return NULL;
56 }
57
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
60         const char *name)
61 {
62         struct softnic_tmgr_port *tmgr_port;
63         struct tm_params *t = &p->soft.tm.params;
64         struct rte_sched_port *sched;
65         uint32_t n_subports, subport_id;
66
67         /* Check input params */
68         if (name == NULL ||
69                 softnic_tmgr_port_find(p, name))
70                 return NULL;
71
72         /*
73          * Resource
74          */
75
76         /* Is hierarchy frozen? */
77         if (p->soft.tm.hierarchy_frozen == 0)
78                 return NULL;
79
80         /* Port */
81         sched = rte_sched_port_config(&t->port_params);
82         if (sched == NULL)
83                 return NULL;
84
85         /* Subport */
86         n_subports = t->port_params.n_subports_per_port;
87         for (subport_id = 0; subport_id < n_subports; subport_id++) {
88                 uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
89                 uint32_t pipe_id;
90                 int status;
91
92                 status = rte_sched_subport_config(sched,
93                         subport_id,
94                         &t->subport_params[subport_id]);
95                 if (status) {
96                         rte_sched_port_free(sched);
97                         return NULL;
98                 }
99
100                 /* Pipe */
101                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
102                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
103                         int profile_id = t->pipe_to_profile[pos];
104
105                         if (profile_id < 0)
106                                 continue;
107
108                         status = rte_sched_pipe_config(sched,
109                                 subport_id,
110                                 pipe_id,
111                                 profile_id);
112                         if (status) {
113                                 rte_sched_port_free(sched);
114                                 return NULL;
115                         }
116                 }
117         }
118
119         /* Node allocation */
120         tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
121         if (tmgr_port == NULL) {
122                 rte_sched_port_free(sched);
123                 return NULL;
124         }
125
126         /* Node fill in */
127         strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
128         tmgr_port->s = sched;
129
130         /* Node add to list */
131         TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
132
133         return tmgr_port;
134 }
135
136 static struct rte_sched_port *
137 SCHED(struct pmd_internals *p)
138 {
139         struct softnic_tmgr_port *tmgr_port;
140
141         tmgr_port = softnic_tmgr_port_find(p, "TMGR");
142         if (tmgr_port == NULL)
143                 return NULL;
144
145         return tmgr_port->s;
146 }
147
148 void
149 tm_hierarchy_init(struct pmd_internals *p)
150 {
151         memset(&p->soft.tm, 0, sizeof(p->soft.tm));
152
153         /* Initialize shaper profile list */
154         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
155
156         /* Initialize shared shaper list */
157         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
158
159         /* Initialize wred profile list */
160         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
161
162         /* Initialize TM node list */
163         TAILQ_INIT(&p->soft.tm.h.nodes);
164 }
165
166 void
167 tm_hierarchy_free(struct pmd_internals *p)
168 {
169         /* Remove all nodes*/
170         for ( ; ; ) {
171                 struct tm_node *tm_node;
172
173                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
174                 if (tm_node == NULL)
175                         break;
176
177                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
178                 free(tm_node);
179         }
180
181         /* Remove all WRED profiles */
182         for ( ; ; ) {
183                 struct tm_wred_profile *wred_profile;
184
185                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
186                 if (wred_profile == NULL)
187                         break;
188
189                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
190                 free(wred_profile);
191         }
192
193         /* Remove all shared shapers */
194         for ( ; ; ) {
195                 struct tm_shared_shaper *shared_shaper;
196
197                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
198                 if (shared_shaper == NULL)
199                         break;
200
201                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
202                 free(shared_shaper);
203         }
204
205         /* Remove all shaper profiles */
206         for ( ; ; ) {
207                 struct tm_shaper_profile *shaper_profile;
208
209                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
210                 if (shaper_profile == NULL)
211                         break;
212
213                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
214                         shaper_profile, node);
215                 free(shaper_profile);
216         }
217
218         tm_hierarchy_init(p);
219 }
220
221 static struct tm_shaper_profile *
222 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
223 {
224         struct pmd_internals *p = dev->data->dev_private;
225         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
226         struct tm_shaper_profile *sp;
227
228         TAILQ_FOREACH(sp, spl, node)
229                 if (shaper_profile_id == sp->shaper_profile_id)
230                         return sp;
231
232         return NULL;
233 }
234
235 static struct tm_shared_shaper *
236 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
237 {
238         struct pmd_internals *p = dev->data->dev_private;
239         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
240         struct tm_shared_shaper *ss;
241
242         TAILQ_FOREACH(ss, ssl, node)
243                 if (shared_shaper_id == ss->shared_shaper_id)
244                         return ss;
245
246         return NULL;
247 }
248
249 static struct tm_wred_profile *
250 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
251 {
252         struct pmd_internals *p = dev->data->dev_private;
253         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
254         struct tm_wred_profile *wp;
255
256         TAILQ_FOREACH(wp, wpl, node)
257                 if (wred_profile_id == wp->wred_profile_id)
258                         return wp;
259
260         return NULL;
261 }
262
263 static struct tm_node *
264 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
265 {
266         struct pmd_internals *p = dev->data->dev_private;
267         struct tm_node_list *nl = &p->soft.tm.h.nodes;
268         struct tm_node *n;
269
270         TAILQ_FOREACH(n, nl, node)
271                 if (n->node_id == node_id)
272                         return n;
273
274         return NULL;
275 }
276
277 static struct tm_node *
278 tm_root_node_present(struct rte_eth_dev *dev)
279 {
280         struct pmd_internals *p = dev->data->dev_private;
281         struct tm_node_list *nl = &p->soft.tm.h.nodes;
282         struct tm_node *n;
283
284         TAILQ_FOREACH(n, nl, node)
285                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
286                         return n;
287
288         return NULL;
289 }
290
291 static uint32_t
292 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
293 {
294         struct pmd_internals *p = dev->data->dev_private;
295         struct tm_node_list *nl = &p->soft.tm.h.nodes;
296         struct tm_node *ns;
297         uint32_t subport_id;
298
299         subport_id = 0;
300         TAILQ_FOREACH(ns, nl, node) {
301                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
302                         continue;
303
304                 if (ns->node_id == subport_node->node_id)
305                         return subport_id;
306
307                 subport_id++;
308         }
309
310         return UINT32_MAX;
311 }
312
313 static uint32_t
314 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
315 {
316         struct pmd_internals *p = dev->data->dev_private;
317         struct tm_node_list *nl = &p->soft.tm.h.nodes;
318         struct tm_node *np;
319         uint32_t pipe_id;
320
321         pipe_id = 0;
322         TAILQ_FOREACH(np, nl, node) {
323                 if (np->level != TM_NODE_LEVEL_PIPE ||
324                         np->parent_node_id != pipe_node->parent_node_id)
325                         continue;
326
327                 if (np->node_id == pipe_node->node_id)
328                         return pipe_id;
329
330                 pipe_id++;
331         }
332
333         return UINT32_MAX;
334 }
335
336 static uint32_t
337 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
338 {
339         return tc_node->priority;
340 }
341
342 static uint32_t
343 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
344 {
345         struct pmd_internals *p = dev->data->dev_private;
346         struct tm_node_list *nl = &p->soft.tm.h.nodes;
347         struct tm_node *nq;
348         uint32_t queue_id;
349
350         queue_id = 0;
351         TAILQ_FOREACH(nq, nl, node) {
352                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
353                         nq->parent_node_id != queue_node->parent_node_id)
354                         continue;
355
356                 if (nq->node_id == queue_node->node_id)
357                         return queue_id;
358
359                 queue_id++;
360         }
361
362         return UINT32_MAX;
363 }
364
365 static uint32_t
366 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
367 {
368         struct pmd_internals *p = dev->data->dev_private;
369         uint32_t n_queues_max = p->params.tm.n_queues;
370         uint32_t n_tc_max =
371                 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
372                 / RTE_SCHED_QUEUES_PER_PIPE;
373         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
374         uint32_t n_subports_max = n_pipes_max;
375         uint32_t n_root_max = 1;
376
377         switch (level) {
378         case TM_NODE_LEVEL_PORT:
379                 return n_root_max;
380         case TM_NODE_LEVEL_SUBPORT:
381                 return n_subports_max;
382         case TM_NODE_LEVEL_PIPE:
383                 return n_pipes_max;
384         case TM_NODE_LEVEL_TC:
385                 return n_tc_max;
386         case TM_NODE_LEVEL_QUEUE:
387         default:
388                 return n_queues_max;
389         }
390 }
391
392 /* Traffic manager node type get */
393 static int
394 pmd_tm_node_type_get(struct rte_eth_dev *dev,
395         uint32_t node_id,
396         int *is_leaf,
397         struct rte_tm_error *error)
398 {
399         struct pmd_internals *p = dev->data->dev_private;
400
401         if (is_leaf == NULL)
402                 return -rte_tm_error_set(error,
403                    EINVAL,
404                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
405                    NULL,
406                    rte_strerror(EINVAL));
407
408         if (node_id == RTE_TM_NODE_ID_NULL ||
409                 (tm_node_search(dev, node_id) == NULL))
410                 return -rte_tm_error_set(error,
411                    EINVAL,
412                    RTE_TM_ERROR_TYPE_NODE_ID,
413                    NULL,
414                    rte_strerror(EINVAL));
415
416         *is_leaf = node_id < p->params.tm.n_queues;
417
418         return 0;
419 }
420
421 #ifdef RTE_SCHED_RED
422 #define WRED_SUPPORTED                                          1
423 #else
424 #define WRED_SUPPORTED                                          0
425 #endif
426
427 #define STATS_MASK_DEFAULT                                      \
428         (RTE_TM_STATS_N_PKTS |                                  \
429         RTE_TM_STATS_N_BYTES |                                  \
430         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
431         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
432
433 #define STATS_MASK_QUEUE                                                \
434         (STATS_MASK_DEFAULT |                                   \
435         RTE_TM_STATS_N_PKTS_QUEUED)
436
437 static const struct rte_tm_capabilities tm_cap = {
438         .n_nodes_max = UINT32_MAX,
439         .n_levels_max = TM_NODE_LEVEL_MAX,
440
441         .non_leaf_nodes_identical = 0,
442         .leaf_nodes_identical = 1,
443
444         .shaper_n_max = UINT32_MAX,
445         .shaper_private_n_max = UINT32_MAX,
446         .shaper_private_dual_rate_n_max = 0,
447         .shaper_private_rate_min = 1,
448         .shaper_private_rate_max = UINT32_MAX,
449
450         .shaper_shared_n_max = UINT32_MAX,
451         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
452         .shaper_shared_n_shapers_per_node_max = 1,
453         .shaper_shared_dual_rate_n_max = 0,
454         .shaper_shared_rate_min = 1,
455         .shaper_shared_rate_max = UINT32_MAX,
456
457         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
458         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
459
460         .sched_n_children_max = UINT32_MAX,
461         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
462         .sched_wfq_n_children_per_group_max = UINT32_MAX,
463         .sched_wfq_n_groups_max = 1,
464         .sched_wfq_weight_max = UINT32_MAX,
465
466         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
467         .cman_wred_byte_mode_supported = 0,
468         .cman_head_drop_supported = 0,
469         .cman_wred_context_n_max = 0,
470         .cman_wred_context_private_n_max = 0,
471         .cman_wred_context_shared_n_max = 0,
472         .cman_wred_context_shared_n_nodes_per_context_max = 0,
473         .cman_wred_context_shared_n_contexts_per_node_max = 0,
474
475         .mark_vlan_dei_supported = {0, 0, 0},
476         .mark_ip_ecn_tcp_supported = {0, 0, 0},
477         .mark_ip_ecn_sctp_supported = {0, 0, 0},
478         .mark_ip_dscp_supported = {0, 0, 0},
479
480         .dynamic_update_mask = 0,
481
482         .stats_mask = STATS_MASK_QUEUE,
483 };
484
485 /* Traffic manager capabilities get */
486 static int
487 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
488         struct rte_tm_capabilities *cap,
489         struct rte_tm_error *error)
490 {
491         if (cap == NULL)
492                 return -rte_tm_error_set(error,
493                    EINVAL,
494                    RTE_TM_ERROR_TYPE_CAPABILITIES,
495                    NULL,
496                    rte_strerror(EINVAL));
497
498         memcpy(cap, &tm_cap, sizeof(*cap));
499
500         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
501                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
502                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
503                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
504                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
505
506         cap->shaper_private_n_max =
507                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
508                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
509                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
510                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
511
512         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
513                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
514
515         cap->shaper_n_max = cap->shaper_private_n_max +
516                 cap->shaper_shared_n_max;
517
518         cap->shaper_shared_n_nodes_per_shaper_max =
519                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
520
521         cap->sched_n_children_max = RTE_MAX(
522                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
523                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
524
525         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
526
527         if (WRED_SUPPORTED)
528                 cap->cman_wred_context_private_n_max =
529                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
530
531         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
532                 cap->cman_wred_context_shared_n_max;
533
534         return 0;
535 }
536
537 static const struct rte_tm_level_capabilities tm_level_cap[] = {
538         [TM_NODE_LEVEL_PORT] = {
539                 .n_nodes_max = 1,
540                 .n_nodes_nonleaf_max = 1,
541                 .n_nodes_leaf_max = 0,
542                 .non_leaf_nodes_identical = 1,
543                 .leaf_nodes_identical = 0,
544
545                 {.nonleaf = {
546                         .shaper_private_supported = 1,
547                         .shaper_private_dual_rate_supported = 0,
548                         .shaper_private_rate_min = 1,
549                         .shaper_private_rate_max = UINT32_MAX,
550                         .shaper_shared_n_max = 0,
551
552                         .sched_n_children_max = UINT32_MAX,
553                         .sched_sp_n_priorities_max = 1,
554                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
555                         .sched_wfq_n_groups_max = 1,
556                         .sched_wfq_weight_max = 1,
557
558                         .stats_mask = STATS_MASK_DEFAULT,
559                 } },
560         },
561
562         [TM_NODE_LEVEL_SUBPORT] = {
563                 .n_nodes_max = UINT32_MAX,
564                 .n_nodes_nonleaf_max = UINT32_MAX,
565                 .n_nodes_leaf_max = 0,
566                 .non_leaf_nodes_identical = 1,
567                 .leaf_nodes_identical = 0,
568
569                 {.nonleaf = {
570                         .shaper_private_supported = 1,
571                         .shaper_private_dual_rate_supported = 0,
572                         .shaper_private_rate_min = 1,
573                         .shaper_private_rate_max = UINT32_MAX,
574                         .shaper_shared_n_max = 0,
575
576                         .sched_n_children_max = UINT32_MAX,
577                         .sched_sp_n_priorities_max = 1,
578                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
579                         .sched_wfq_n_groups_max = 1,
580 #ifdef RTE_SCHED_SUBPORT_TC_OV
581                         .sched_wfq_weight_max = UINT32_MAX,
582 #else
583                         .sched_wfq_weight_max = 1,
584 #endif
585                         .stats_mask = STATS_MASK_DEFAULT,
586                 } },
587         },
588
589         [TM_NODE_LEVEL_PIPE] = {
590                 .n_nodes_max = UINT32_MAX,
591                 .n_nodes_nonleaf_max = UINT32_MAX,
592                 .n_nodes_leaf_max = 0,
593                 .non_leaf_nodes_identical = 1,
594                 .leaf_nodes_identical = 0,
595
596                 {.nonleaf = {
597                         .shaper_private_supported = 1,
598                         .shaper_private_dual_rate_supported = 0,
599                         .shaper_private_rate_min = 1,
600                         .shaper_private_rate_max = UINT32_MAX,
601                         .shaper_shared_n_max = 0,
602
603                         .sched_n_children_max =
604                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
605                         .sched_sp_n_priorities_max =
606                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
607                         .sched_wfq_n_children_per_group_max = 1,
608                         .sched_wfq_n_groups_max = 0,
609                         .sched_wfq_weight_max = 1,
610
611                         .stats_mask = STATS_MASK_DEFAULT,
612                 } },
613         },
614
615         [TM_NODE_LEVEL_TC] = {
616                 .n_nodes_max = UINT32_MAX,
617                 .n_nodes_nonleaf_max = UINT32_MAX,
618                 .n_nodes_leaf_max = 0,
619                 .non_leaf_nodes_identical = 1,
620                 .leaf_nodes_identical = 0,
621
622                 {.nonleaf = {
623                         .shaper_private_supported = 1,
624                         .shaper_private_dual_rate_supported = 0,
625                         .shaper_private_rate_min = 1,
626                         .shaper_private_rate_max = UINT32_MAX,
627                         .shaper_shared_n_max = 1,
628
629                         .sched_n_children_max =
630                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
631                         .sched_sp_n_priorities_max = 1,
632                         .sched_wfq_n_children_per_group_max =
633                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
634                         .sched_wfq_n_groups_max = 1,
635                         .sched_wfq_weight_max = UINT32_MAX,
636
637                         .stats_mask = STATS_MASK_DEFAULT,
638                 } },
639         },
640
641         [TM_NODE_LEVEL_QUEUE] = {
642                 .n_nodes_max = UINT32_MAX,
643                 .n_nodes_nonleaf_max = 0,
644                 .n_nodes_leaf_max = UINT32_MAX,
645                 .non_leaf_nodes_identical = 0,
646                 .leaf_nodes_identical = 1,
647
648                 {.leaf = {
649                         .shaper_private_supported = 0,
650                         .shaper_private_dual_rate_supported = 0,
651                         .shaper_private_rate_min = 0,
652                         .shaper_private_rate_max = 0,
653                         .shaper_shared_n_max = 0,
654
655                         .cman_head_drop_supported = 0,
656                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
657                         .cman_wred_byte_mode_supported = 0,
658                         .cman_wred_context_private_supported = WRED_SUPPORTED,
659                         .cman_wred_context_shared_n_max = 0,
660
661                         .stats_mask = STATS_MASK_QUEUE,
662                 } },
663         },
664 };
665
666 /* Traffic manager level capabilities get */
667 static int
668 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
669         uint32_t level_id,
670         struct rte_tm_level_capabilities *cap,
671         struct rte_tm_error *error)
672 {
673         if (cap == NULL)
674                 return -rte_tm_error_set(error,
675                    EINVAL,
676                    RTE_TM_ERROR_TYPE_CAPABILITIES,
677                    NULL,
678                    rte_strerror(EINVAL));
679
680         if (level_id >= TM_NODE_LEVEL_MAX)
681                 return -rte_tm_error_set(error,
682                    EINVAL,
683                    RTE_TM_ERROR_TYPE_LEVEL_ID,
684                    NULL,
685                    rte_strerror(EINVAL));
686
687         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
688
689         switch (level_id) {
690         case TM_NODE_LEVEL_PORT:
691                 cap->nonleaf.sched_n_children_max =
692                         tm_level_get_max_nodes(dev,
693                                 TM_NODE_LEVEL_SUBPORT);
694                 cap->nonleaf.sched_wfq_n_children_per_group_max =
695                         cap->nonleaf.sched_n_children_max;
696                 break;
697
698         case TM_NODE_LEVEL_SUBPORT:
699                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
700                         TM_NODE_LEVEL_SUBPORT);
701                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
702                 cap->nonleaf.sched_n_children_max =
703                         tm_level_get_max_nodes(dev,
704                                 TM_NODE_LEVEL_PIPE);
705                 cap->nonleaf.sched_wfq_n_children_per_group_max =
706                         cap->nonleaf.sched_n_children_max;
707                 break;
708
709         case TM_NODE_LEVEL_PIPE:
710                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
711                         TM_NODE_LEVEL_PIPE);
712                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
713                 break;
714
715         case TM_NODE_LEVEL_TC:
716                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
717                         TM_NODE_LEVEL_TC);
718                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
719                 break;
720
721         case TM_NODE_LEVEL_QUEUE:
722         default:
723                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
724                         TM_NODE_LEVEL_QUEUE);
725                 cap->n_nodes_leaf_max = cap->n_nodes_max;
726                 break;
727         }
728
729         return 0;
730 }
731
732 static const struct rte_tm_node_capabilities tm_node_cap[] = {
733         [TM_NODE_LEVEL_PORT] = {
734                 .shaper_private_supported = 1,
735                 .shaper_private_dual_rate_supported = 0,
736                 .shaper_private_rate_min = 1,
737                 .shaper_private_rate_max = UINT32_MAX,
738                 .shaper_shared_n_max = 0,
739
740                 {.nonleaf = {
741                         .sched_n_children_max = UINT32_MAX,
742                         .sched_sp_n_priorities_max = 1,
743                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
744                         .sched_wfq_n_groups_max = 1,
745                         .sched_wfq_weight_max = 1,
746                 } },
747
748                 .stats_mask = STATS_MASK_DEFAULT,
749         },
750
751         [TM_NODE_LEVEL_SUBPORT] = {
752                 .shaper_private_supported = 1,
753                 .shaper_private_dual_rate_supported = 0,
754                 .shaper_private_rate_min = 1,
755                 .shaper_private_rate_max = UINT32_MAX,
756                 .shaper_shared_n_max = 0,
757
758                 {.nonleaf = {
759                         .sched_n_children_max = UINT32_MAX,
760                         .sched_sp_n_priorities_max = 1,
761                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
762                         .sched_wfq_n_groups_max = 1,
763                         .sched_wfq_weight_max = UINT32_MAX,
764                 } },
765
766                 .stats_mask = STATS_MASK_DEFAULT,
767         },
768
769         [TM_NODE_LEVEL_PIPE] = {
770                 .shaper_private_supported = 1,
771                 .shaper_private_dual_rate_supported = 0,
772                 .shaper_private_rate_min = 1,
773                 .shaper_private_rate_max = UINT32_MAX,
774                 .shaper_shared_n_max = 0,
775
776                 {.nonleaf = {
777                         .sched_n_children_max =
778                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
779                         .sched_sp_n_priorities_max =
780                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
781                         .sched_wfq_n_children_per_group_max = 1,
782                         .sched_wfq_n_groups_max = 0,
783                         .sched_wfq_weight_max = 1,
784                 } },
785
786                 .stats_mask = STATS_MASK_DEFAULT,
787         },
788
789         [TM_NODE_LEVEL_TC] = {
790                 .shaper_private_supported = 1,
791                 .shaper_private_dual_rate_supported = 0,
792                 .shaper_private_rate_min = 1,
793                 .shaper_private_rate_max = UINT32_MAX,
794                 .shaper_shared_n_max = 1,
795
796                 {.nonleaf = {
797                         .sched_n_children_max =
798                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
799                         .sched_sp_n_priorities_max = 1,
800                         .sched_wfq_n_children_per_group_max =
801                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
802                         .sched_wfq_n_groups_max = 1,
803                         .sched_wfq_weight_max = UINT32_MAX,
804                 } },
805
806                 .stats_mask = STATS_MASK_DEFAULT,
807         },
808
809         [TM_NODE_LEVEL_QUEUE] = {
810                 .shaper_private_supported = 0,
811                 .shaper_private_dual_rate_supported = 0,
812                 .shaper_private_rate_min = 0,
813                 .shaper_private_rate_max = 0,
814                 .shaper_shared_n_max = 0,
815
816
817                 {.leaf = {
818                         .cman_head_drop_supported = 0,
819                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
820                         .cman_wred_byte_mode_supported = 0,
821                         .cman_wred_context_private_supported = WRED_SUPPORTED,
822                         .cman_wred_context_shared_n_max = 0,
823                 } },
824
825                 .stats_mask = STATS_MASK_QUEUE,
826         },
827 };
828
829 /* Traffic manager node capabilities get */
830 static int
831 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
832         uint32_t node_id,
833         struct rte_tm_node_capabilities *cap,
834         struct rte_tm_error *error)
835 {
836         struct tm_node *tm_node;
837
838         if (cap == NULL)
839                 return -rte_tm_error_set(error,
840                    EINVAL,
841                    RTE_TM_ERROR_TYPE_CAPABILITIES,
842                    NULL,
843                    rte_strerror(EINVAL));
844
845         tm_node = tm_node_search(dev, node_id);
846         if (tm_node == NULL)
847                 return -rte_tm_error_set(error,
848                    EINVAL,
849                    RTE_TM_ERROR_TYPE_NODE_ID,
850                    NULL,
851                    rte_strerror(EINVAL));
852
853         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
854
855         switch (tm_node->level) {
856         case TM_NODE_LEVEL_PORT:
857                 cap->nonleaf.sched_n_children_max =
858                         tm_level_get_max_nodes(dev,
859                                 TM_NODE_LEVEL_SUBPORT);
860                 cap->nonleaf.sched_wfq_n_children_per_group_max =
861                         cap->nonleaf.sched_n_children_max;
862                 break;
863
864         case TM_NODE_LEVEL_SUBPORT:
865                 cap->nonleaf.sched_n_children_max =
866                         tm_level_get_max_nodes(dev,
867                                 TM_NODE_LEVEL_PIPE);
868                 cap->nonleaf.sched_wfq_n_children_per_group_max =
869                         cap->nonleaf.sched_n_children_max;
870                 break;
871
872         case TM_NODE_LEVEL_PIPE:
873         case TM_NODE_LEVEL_TC:
874         case TM_NODE_LEVEL_QUEUE:
875         default:
876                 break;
877         }
878
879         return 0;
880 }
881
882 static int
883 shaper_profile_check(struct rte_eth_dev *dev,
884         uint32_t shaper_profile_id,
885         struct rte_tm_shaper_params *profile,
886         struct rte_tm_error *error)
887 {
888         struct tm_shaper_profile *sp;
889
890         /* Shaper profile ID must not be NONE. */
891         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
892                 return -rte_tm_error_set(error,
893                         EINVAL,
894                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
895                         NULL,
896                         rte_strerror(EINVAL));
897
898         /* Shaper profile must not exist. */
899         sp = tm_shaper_profile_search(dev, shaper_profile_id);
900         if (sp)
901                 return -rte_tm_error_set(error,
902                         EEXIST,
903                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
904                         NULL,
905                         rte_strerror(EEXIST));
906
907         /* Profile must not be NULL. */
908         if (profile == NULL)
909                 return -rte_tm_error_set(error,
910                         EINVAL,
911                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
912                         NULL,
913                         rte_strerror(EINVAL));
914
915         /* Peak rate: non-zero, 32-bit */
916         if (profile->peak.rate == 0 ||
917                 profile->peak.rate >= UINT32_MAX)
918                 return -rte_tm_error_set(error,
919                         EINVAL,
920                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
921                         NULL,
922                         rte_strerror(EINVAL));
923
924         /* Peak size: non-zero, 32-bit */
925         if (profile->peak.size == 0 ||
926                 profile->peak.size >= UINT32_MAX)
927                 return -rte_tm_error_set(error,
928                         EINVAL,
929                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
930                         NULL,
931                         rte_strerror(EINVAL));
932
933         /* Dual-rate profiles are not supported. */
934         if (profile->committed.rate != 0)
935                 return -rte_tm_error_set(error,
936                         EINVAL,
937                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
938                         NULL,
939                         rte_strerror(EINVAL));
940
941         /* Packet length adjust: 24 bytes */
942         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
943                 return -rte_tm_error_set(error,
944                         EINVAL,
945                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
946                         NULL,
947                         rte_strerror(EINVAL));
948
949         return 0;
950 }
951
952 /* Traffic manager shaper profile add */
953 static int
954 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
955         uint32_t shaper_profile_id,
956         struct rte_tm_shaper_params *profile,
957         struct rte_tm_error *error)
958 {
959         struct pmd_internals *p = dev->data->dev_private;
960         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
961         struct tm_shaper_profile *sp;
962         int status;
963
964         /* Check input params */
965         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
966         if (status)
967                 return status;
968
969         /* Memory allocation */
970         sp = calloc(1, sizeof(struct tm_shaper_profile));
971         if (sp == NULL)
972                 return -rte_tm_error_set(error,
973                         ENOMEM,
974                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
975                         NULL,
976                         rte_strerror(ENOMEM));
977
978         /* Fill in */
979         sp->shaper_profile_id = shaper_profile_id;
980         memcpy(&sp->params, profile, sizeof(sp->params));
981
982         /* Add to list */
983         TAILQ_INSERT_TAIL(spl, sp, node);
984         p->soft.tm.h.n_shaper_profiles++;
985
986         return 0;
987 }
988
989 /* Traffic manager shaper profile delete */
990 static int
991 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
992         uint32_t shaper_profile_id,
993         struct rte_tm_error *error)
994 {
995         struct pmd_internals *p = dev->data->dev_private;
996         struct tm_shaper_profile *sp;
997
998         /* Check existing */
999         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1000         if (sp == NULL)
1001                 return -rte_tm_error_set(error,
1002                         EINVAL,
1003                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1004                         NULL,
1005                         rte_strerror(EINVAL));
1006
1007         /* Check unused */
1008         if (sp->n_users)
1009                 return -rte_tm_error_set(error,
1010                         EBUSY,
1011                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1012                         NULL,
1013                         rte_strerror(EBUSY));
1014
1015         /* Remove from list */
1016         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1017         p->soft.tm.h.n_shaper_profiles--;
1018         free(sp);
1019
1020         return 0;
1021 }
1022
1023 static struct tm_node *
1024 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1025         struct tm_shared_shaper *ss)
1026 {
1027         struct pmd_internals *p = dev->data->dev_private;
1028         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1029         struct tm_node *n;
1030
1031         /* Subport: each TC uses shared shaper  */
1032         TAILQ_FOREACH(n, nl, node) {
1033                 if (n->level != TM_NODE_LEVEL_TC ||
1034                         n->params.n_shared_shapers == 0 ||
1035                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1036                         continue;
1037
1038                 return n;
1039         }
1040
1041         return NULL;
1042 }
1043
1044 static int
1045 update_subport_tc_rate(struct rte_eth_dev *dev,
1046         struct tm_node *nt,
1047         struct tm_shared_shaper *ss,
1048         struct tm_shaper_profile *sp_new)
1049 {
1050         struct pmd_internals *p = dev->data->dev_private;
1051         uint32_t tc_id = tm_node_tc_id(dev, nt);
1052
1053         struct tm_node *np = nt->parent_node;
1054
1055         struct tm_node *ns = np->parent_node;
1056         uint32_t subport_id = tm_node_subport_id(dev, ns);
1057
1058         struct rte_sched_subport_params subport_params;
1059
1060         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1061                 ss->shaper_profile_id);
1062
1063         /* Derive new subport configuration. */
1064         memcpy(&subport_params,
1065                 &p->soft.tm.params.subport_params[subport_id],
1066                 sizeof(subport_params));
1067         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1068
1069         /* Update the subport configuration. */
1070         if (rte_sched_subport_config(SCHED(p),
1071                 subport_id, &subport_params))
1072                 return -1;
1073
1074         /* Commit changes. */
1075         sp_old->n_users--;
1076
1077         ss->shaper_profile_id = sp_new->shaper_profile_id;
1078         sp_new->n_users++;
1079
1080         memcpy(&p->soft.tm.params.subport_params[subport_id],
1081                 &subport_params,
1082                 sizeof(subport_params));
1083
1084         return 0;
1085 }
1086
1087 /* Traffic manager shared shaper add/update */
1088 static int
1089 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1090         uint32_t shared_shaper_id,
1091         uint32_t shaper_profile_id,
1092         struct rte_tm_error *error)
1093 {
1094         struct pmd_internals *p = dev->data->dev_private;
1095         struct tm_shared_shaper *ss;
1096         struct tm_shaper_profile *sp;
1097         struct tm_node *nt;
1098
1099         /* Shaper profile must be valid. */
1100         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1101         if (sp == NULL)
1102                 return -rte_tm_error_set(error,
1103                         EINVAL,
1104                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1105                         NULL,
1106                         rte_strerror(EINVAL));
1107
1108         /**
1109          * Add new shared shaper
1110          */
1111         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1112         if (ss == NULL) {
1113                 struct tm_shared_shaper_list *ssl =
1114                         &p->soft.tm.h.shared_shapers;
1115
1116                 /* Hierarchy must not be frozen */
1117                 if (p->soft.tm.hierarchy_frozen)
1118                         return -rte_tm_error_set(error,
1119                                 EBUSY,
1120                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1121                                 NULL,
1122                                 rte_strerror(EBUSY));
1123
1124                 /* Memory allocation */
1125                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1126                 if (ss == NULL)
1127                         return -rte_tm_error_set(error,
1128                                 ENOMEM,
1129                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1130                                 NULL,
1131                                 rte_strerror(ENOMEM));
1132
1133                 /* Fill in */
1134                 ss->shared_shaper_id = shared_shaper_id;
1135                 ss->shaper_profile_id = shaper_profile_id;
1136
1137                 /* Add to list */
1138                 TAILQ_INSERT_TAIL(ssl, ss, node);
1139                 p->soft.tm.h.n_shared_shapers++;
1140
1141                 return 0;
1142         }
1143
1144         /**
1145          * Update existing shared shaper
1146          */
1147         /* Hierarchy must be frozen (run-time update) */
1148         if (p->soft.tm.hierarchy_frozen == 0)
1149                 return -rte_tm_error_set(error,
1150                         EBUSY,
1151                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1152                         NULL,
1153                         rte_strerror(EBUSY));
1154
1155
1156         /* Propagate change. */
1157         nt = tm_shared_shaper_get_tc(dev, ss);
1158         if (update_subport_tc_rate(dev, nt, ss, sp))
1159                 return -rte_tm_error_set(error,
1160                         EINVAL,
1161                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1162                         NULL,
1163                         rte_strerror(EINVAL));
1164
1165         return 0;
1166 }
1167
1168 /* Traffic manager shared shaper delete */
1169 static int
1170 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1171         uint32_t shared_shaper_id,
1172         struct rte_tm_error *error)
1173 {
1174         struct pmd_internals *p = dev->data->dev_private;
1175         struct tm_shared_shaper *ss;
1176
1177         /* Check existing */
1178         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1179         if (ss == NULL)
1180                 return -rte_tm_error_set(error,
1181                         EINVAL,
1182                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1183                         NULL,
1184                         rte_strerror(EINVAL));
1185
1186         /* Check unused */
1187         if (ss->n_users)
1188                 return -rte_tm_error_set(error,
1189                         EBUSY,
1190                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1191                         NULL,
1192                         rte_strerror(EBUSY));
1193
1194         /* Remove from list */
1195         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1196         p->soft.tm.h.n_shared_shapers--;
1197         free(ss);
1198
1199         return 0;
1200 }
1201
1202 static int
1203 wred_profile_check(struct rte_eth_dev *dev,
1204         uint32_t wred_profile_id,
1205         struct rte_tm_wred_params *profile,
1206         struct rte_tm_error *error)
1207 {
1208         struct tm_wred_profile *wp;
1209         enum rte_color color;
1210
1211         /* WRED profile ID must not be NONE. */
1212         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1213                 return -rte_tm_error_set(error,
1214                         EINVAL,
1215                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1216                         NULL,
1217                         rte_strerror(EINVAL));
1218
1219         /* WRED profile must not exist. */
1220         wp = tm_wred_profile_search(dev, wred_profile_id);
1221         if (wp)
1222                 return -rte_tm_error_set(error,
1223                         EEXIST,
1224                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1225                         NULL,
1226                         rte_strerror(EEXIST));
1227
1228         /* Profile must not be NULL. */
1229         if (profile == NULL)
1230                 return -rte_tm_error_set(error,
1231                         EINVAL,
1232                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1233                         NULL,
1234                         rte_strerror(EINVAL));
1235
1236         /* WRED profile should be in packet mode */
1237         if (profile->packet_mode == 0)
1238                 return -rte_tm_error_set(error,
1239                         ENOTSUP,
1240                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1241                         NULL,
1242                         rte_strerror(ENOTSUP));
1243
1244         /* min_th <= max_th, max_th > 0  */
1245         for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1246                 uint32_t min_th = profile->red_params[color].min_th;
1247                 uint32_t max_th = profile->red_params[color].max_th;
1248
1249                 if (min_th > max_th ||
1250                         max_th == 0 ||
1251                         min_th > UINT16_MAX ||
1252                         max_th > UINT16_MAX)
1253                         return -rte_tm_error_set(error,
1254                                 EINVAL,
1255                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1256                                 NULL,
1257                                 rte_strerror(EINVAL));
1258         }
1259
1260         return 0;
1261 }
1262
1263 /* Traffic manager WRED profile add */
1264 static int
1265 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1266         uint32_t wred_profile_id,
1267         struct rte_tm_wred_params *profile,
1268         struct rte_tm_error *error)
1269 {
1270         struct pmd_internals *p = dev->data->dev_private;
1271         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1272         struct tm_wred_profile *wp;
1273         int status;
1274
1275         /* Check input params */
1276         status = wred_profile_check(dev, wred_profile_id, profile, error);
1277         if (status)
1278                 return status;
1279
1280         /* Memory allocation */
1281         wp = calloc(1, sizeof(struct tm_wred_profile));
1282         if (wp == NULL)
1283                 return -rte_tm_error_set(error,
1284                         ENOMEM,
1285                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1286                         NULL,
1287                         rte_strerror(ENOMEM));
1288
1289         /* Fill in */
1290         wp->wred_profile_id = wred_profile_id;
1291         memcpy(&wp->params, profile, sizeof(wp->params));
1292
1293         /* Add to list */
1294         TAILQ_INSERT_TAIL(wpl, wp, node);
1295         p->soft.tm.h.n_wred_profiles++;
1296
1297         return 0;
1298 }
1299
1300 /* Traffic manager WRED profile delete */
1301 static int
1302 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1303         uint32_t wred_profile_id,
1304         struct rte_tm_error *error)
1305 {
1306         struct pmd_internals *p = dev->data->dev_private;
1307         struct tm_wred_profile *wp;
1308
1309         /* Check existing */
1310         wp = tm_wred_profile_search(dev, wred_profile_id);
1311         if (wp == NULL)
1312                 return -rte_tm_error_set(error,
1313                         EINVAL,
1314                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1315                         NULL,
1316                         rte_strerror(EINVAL));
1317
1318         /* Check unused */
1319         if (wp->n_users)
1320                 return -rte_tm_error_set(error,
1321                         EBUSY,
1322                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1323                         NULL,
1324                         rte_strerror(EBUSY));
1325
1326         /* Remove from list */
1327         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1328         p->soft.tm.h.n_wred_profiles--;
1329         free(wp);
1330
1331         return 0;
1332 }
1333
1334 static int
1335 node_add_check_port(struct rte_eth_dev *dev,
1336         uint32_t node_id,
1337         uint32_t parent_node_id __rte_unused,
1338         uint32_t priority,
1339         uint32_t weight,
1340         uint32_t level_id __rte_unused,
1341         struct rte_tm_node_params *params,
1342         struct rte_tm_error *error)
1343 {
1344         struct pmd_internals *p = dev->data->dev_private;
1345         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1346                 params->shaper_profile_id);
1347
1348         /* node type: non-leaf */
1349         if (node_id < p->params.tm.n_queues)
1350                 return -rte_tm_error_set(error,
1351                         EINVAL,
1352                         RTE_TM_ERROR_TYPE_NODE_ID,
1353                         NULL,
1354                         rte_strerror(EINVAL));
1355
1356         /* Priority must be 0 */
1357         if (priority != 0)
1358                 return -rte_tm_error_set(error,
1359                         EINVAL,
1360                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1361                         NULL,
1362                         rte_strerror(EINVAL));
1363
1364         /* Weight must be 1 */
1365         if (weight != 1)
1366                 return -rte_tm_error_set(error,
1367                         EINVAL,
1368                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1369                         NULL,
1370                         rte_strerror(EINVAL));
1371
1372         /* Shaper must be valid */
1373         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1374                 sp == NULL)
1375                 return -rte_tm_error_set(error,
1376                         EINVAL,
1377                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1378                         NULL,
1379                         rte_strerror(EINVAL));
1380
1381         /* No shared shapers */
1382         if (params->n_shared_shapers != 0)
1383                 return -rte_tm_error_set(error,
1384                         EINVAL,
1385                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1386                         NULL,
1387                         rte_strerror(EINVAL));
1388
1389         /* Number of SP priorities must be 1 */
1390         if (params->nonleaf.n_sp_priorities != 1)
1391                 return -rte_tm_error_set(error,
1392                         EINVAL,
1393                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1394                         NULL,
1395                         rte_strerror(EINVAL));
1396
1397         /* Stats */
1398         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1399                 return -rte_tm_error_set(error,
1400                         EINVAL,
1401                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1402                         NULL,
1403                         rte_strerror(EINVAL));
1404
1405         return 0;
1406 }
1407
1408 static int
1409 node_add_check_subport(struct rte_eth_dev *dev,
1410         uint32_t node_id,
1411         uint32_t parent_node_id __rte_unused,
1412         uint32_t priority,
1413         uint32_t weight,
1414         uint32_t level_id __rte_unused,
1415         struct rte_tm_node_params *params,
1416         struct rte_tm_error *error)
1417 {
1418         struct pmd_internals *p = dev->data->dev_private;
1419
1420         /* node type: non-leaf */
1421         if (node_id < p->params.tm.n_queues)
1422                 return -rte_tm_error_set(error,
1423                         EINVAL,
1424                         RTE_TM_ERROR_TYPE_NODE_ID,
1425                         NULL,
1426                         rte_strerror(EINVAL));
1427
1428         /* Priority must be 0 */
1429         if (priority != 0)
1430                 return -rte_tm_error_set(error,
1431                         EINVAL,
1432                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1433                         NULL,
1434                         rte_strerror(EINVAL));
1435
1436         /* Weight must be 1 */
1437         if (weight != 1)
1438                 return -rte_tm_error_set(error,
1439                         EINVAL,
1440                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1441                         NULL,
1442                         rte_strerror(EINVAL));
1443
1444         /* Shaper must be valid */
1445         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1446                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1447                 return -rte_tm_error_set(error,
1448                         EINVAL,
1449                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1450                         NULL,
1451                         rte_strerror(EINVAL));
1452
1453         /* No shared shapers */
1454         if (params->n_shared_shapers != 0)
1455                 return -rte_tm_error_set(error,
1456                         EINVAL,
1457                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1458                         NULL,
1459                         rte_strerror(EINVAL));
1460
1461         /* Number of SP priorities must be 1 */
1462         if (params->nonleaf.n_sp_priorities != 1)
1463                 return -rte_tm_error_set(error,
1464                         EINVAL,
1465                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1466                         NULL,
1467                         rte_strerror(EINVAL));
1468
1469         /* Stats */
1470         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1471                 return -rte_tm_error_set(error,
1472                         EINVAL,
1473                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1474                         NULL,
1475                         rte_strerror(EINVAL));
1476
1477         return 0;
1478 }
1479
1480 static int
1481 node_add_check_pipe(struct rte_eth_dev *dev,
1482         uint32_t node_id,
1483         uint32_t parent_node_id __rte_unused,
1484         uint32_t priority,
1485         uint32_t weight __rte_unused,
1486         uint32_t level_id __rte_unused,
1487         struct rte_tm_node_params *params,
1488         struct rte_tm_error *error)
1489 {
1490         struct pmd_internals *p = dev->data->dev_private;
1491
1492         /* node type: non-leaf */
1493         if (node_id < p->params.tm.n_queues)
1494                 return -rte_tm_error_set(error,
1495                         EINVAL,
1496                         RTE_TM_ERROR_TYPE_NODE_ID,
1497                         NULL,
1498                         rte_strerror(EINVAL));
1499
1500         /* Priority must be 0 */
1501         if (priority != 0)
1502                 return -rte_tm_error_set(error,
1503                         EINVAL,
1504                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1505                         NULL,
1506                         rte_strerror(EINVAL));
1507
1508         /* Shaper must be valid */
1509         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1510                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1511                 return -rte_tm_error_set(error,
1512                         EINVAL,
1513                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1514                         NULL,
1515                         rte_strerror(EINVAL));
1516
1517         /* No shared shapers */
1518         if (params->n_shared_shapers != 0)
1519                 return -rte_tm_error_set(error,
1520                         EINVAL,
1521                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1522                         NULL,
1523                         rte_strerror(EINVAL));
1524
1525         /* Number of SP priorities must be 4 */
1526         if (params->nonleaf.n_sp_priorities !=
1527                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1528                 return -rte_tm_error_set(error,
1529                         EINVAL,
1530                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1531                         NULL,
1532                         rte_strerror(EINVAL));
1533
1534         /* WFQ mode must be byte mode */
1535         if (params->nonleaf.wfq_weight_mode != NULL &&
1536                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1537                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1538                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1539                 params->nonleaf.wfq_weight_mode[3] != 0)
1540                 return -rte_tm_error_set(error,
1541                         EINVAL,
1542                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1543                         NULL,
1544                         rte_strerror(EINVAL));
1545
1546         /* Stats */
1547         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1548                 return -rte_tm_error_set(error,
1549                         EINVAL,
1550                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1551                         NULL,
1552                         rte_strerror(EINVAL));
1553
1554         return 0;
1555 }
1556
1557 static int
1558 node_add_check_tc(struct rte_eth_dev *dev,
1559         uint32_t node_id,
1560         uint32_t parent_node_id __rte_unused,
1561         uint32_t priority __rte_unused,
1562         uint32_t weight,
1563         uint32_t level_id __rte_unused,
1564         struct rte_tm_node_params *params,
1565         struct rte_tm_error *error)
1566 {
1567         struct pmd_internals *p = dev->data->dev_private;
1568
1569         /* node type: non-leaf */
1570         if (node_id < p->params.tm.n_queues)
1571                 return -rte_tm_error_set(error,
1572                         EINVAL,
1573                         RTE_TM_ERROR_TYPE_NODE_ID,
1574                         NULL,
1575                         rte_strerror(EINVAL));
1576
1577         /* Weight must be 1 */
1578         if (weight != 1)
1579                 return -rte_tm_error_set(error,
1580                         EINVAL,
1581                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1582                         NULL,
1583                         rte_strerror(EINVAL));
1584
1585         /* Shaper must be valid */
1586         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1587                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1588                 return -rte_tm_error_set(error,
1589                         EINVAL,
1590                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1591                         NULL,
1592                         rte_strerror(EINVAL));
1593
1594         /* Single valid shared shaper */
1595         if (params->n_shared_shapers > 1)
1596                 return -rte_tm_error_set(error,
1597                         EINVAL,
1598                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1599                         NULL,
1600                         rte_strerror(EINVAL));
1601
1602         if (params->n_shared_shapers == 1 &&
1603                 (params->shared_shaper_id == NULL ||
1604                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1605                 return -rte_tm_error_set(error,
1606                         EINVAL,
1607                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1608                         NULL,
1609                         rte_strerror(EINVAL));
1610
1611         /* Number of priorities must be 1 */
1612         if (params->nonleaf.n_sp_priorities != 1)
1613                 return -rte_tm_error_set(error,
1614                         EINVAL,
1615                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1616                         NULL,
1617                         rte_strerror(EINVAL));
1618
1619         /* Stats */
1620         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1621                 return -rte_tm_error_set(error,
1622                         EINVAL,
1623                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1624                         NULL,
1625                         rte_strerror(EINVAL));
1626
1627         return 0;
1628 }
1629
1630 static int
1631 node_add_check_queue(struct rte_eth_dev *dev,
1632         uint32_t node_id,
1633         uint32_t parent_node_id __rte_unused,
1634         uint32_t priority,
1635         uint32_t weight __rte_unused,
1636         uint32_t level_id __rte_unused,
1637         struct rte_tm_node_params *params,
1638         struct rte_tm_error *error)
1639 {
1640         struct pmd_internals *p = dev->data->dev_private;
1641
1642         /* node type: leaf */
1643         if (node_id >= p->params.tm.n_queues)
1644                 return -rte_tm_error_set(error,
1645                         EINVAL,
1646                         RTE_TM_ERROR_TYPE_NODE_ID,
1647                         NULL,
1648                         rte_strerror(EINVAL));
1649
1650         /* Priority must be 0 */
1651         if (priority != 0)
1652                 return -rte_tm_error_set(error,
1653                         EINVAL,
1654                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1655                         NULL,
1656                         rte_strerror(EINVAL));
1657
1658         /* No shaper */
1659         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1660                 return -rte_tm_error_set(error,
1661                         EINVAL,
1662                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1663                         NULL,
1664                         rte_strerror(EINVAL));
1665
1666         /* No shared shapers */
1667         if (params->n_shared_shapers != 0)
1668                 return -rte_tm_error_set(error,
1669                         EINVAL,
1670                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1671                         NULL,
1672                         rte_strerror(EINVAL));
1673
1674         /* Congestion management must not be head drop */
1675         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1676                 return -rte_tm_error_set(error,
1677                         EINVAL,
1678                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1679                         NULL,
1680                         rte_strerror(EINVAL));
1681
1682         /* Congestion management set to WRED */
1683         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1684                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1685                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1686                         wred_profile_id);
1687
1688                 /* WRED profile (for private WRED context) must be valid */
1689                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1690                         wp == NULL)
1691                         return -rte_tm_error_set(error,
1692                                 EINVAL,
1693                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1694                                 NULL,
1695                                 rte_strerror(EINVAL));
1696
1697                 /* No shared WRED contexts */
1698                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1699                         return -rte_tm_error_set(error,
1700                                 EINVAL,
1701                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1702                                 NULL,
1703                                 rte_strerror(EINVAL));
1704         }
1705
1706         /* Stats */
1707         if (params->stats_mask & ~STATS_MASK_QUEUE)
1708                 return -rte_tm_error_set(error,
1709                         EINVAL,
1710                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1711                         NULL,
1712                         rte_strerror(EINVAL));
1713
1714         return 0;
1715 }
1716
1717 static int
1718 node_add_check(struct rte_eth_dev *dev,
1719         uint32_t node_id,
1720         uint32_t parent_node_id,
1721         uint32_t priority,
1722         uint32_t weight,
1723         uint32_t level_id,
1724         struct rte_tm_node_params *params,
1725         struct rte_tm_error *error)
1726 {
1727         struct tm_node *pn;
1728         uint32_t level;
1729         int status;
1730
1731         /* node_id, parent_node_id:
1732          *    -node_id must not be RTE_TM_NODE_ID_NULL
1733          *    -node_id must not be in use
1734          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1735          *        -root node must not exist
1736          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1737          *        -parent_node_id must be valid
1738          */
1739         if (node_id == RTE_TM_NODE_ID_NULL)
1740                 return -rte_tm_error_set(error,
1741                         EINVAL,
1742                         RTE_TM_ERROR_TYPE_NODE_ID,
1743                         NULL,
1744                         rte_strerror(EINVAL));
1745
1746         if (tm_node_search(dev, node_id))
1747                 return -rte_tm_error_set(error,
1748                         EEXIST,
1749                         RTE_TM_ERROR_TYPE_NODE_ID,
1750                         NULL,
1751                         rte_strerror(EEXIST));
1752
1753         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1754                 pn = NULL;
1755                 if (tm_root_node_present(dev))
1756                         return -rte_tm_error_set(error,
1757                                 EEXIST,
1758                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1759                                 NULL,
1760                                 rte_strerror(EEXIST));
1761         } else {
1762                 pn = tm_node_search(dev, parent_node_id);
1763                 if (pn == NULL)
1764                         return -rte_tm_error_set(error,
1765                                 EINVAL,
1766                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1767                                 NULL,
1768                                 rte_strerror(EINVAL));
1769         }
1770
1771         /* priority: must be 0 .. 3 */
1772         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1773                 return -rte_tm_error_set(error,
1774                         EINVAL,
1775                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1776                         NULL,
1777                         rte_strerror(EINVAL));
1778
1779         /* weight: must be 1 .. 255 */
1780         if (weight == 0 || weight >= UINT8_MAX)
1781                 return -rte_tm_error_set(error,
1782                         EINVAL,
1783                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1784                         NULL,
1785                         rte_strerror(EINVAL));
1786
1787         /* level_id: if valid, then
1788          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1789          *        -level_id must be zero
1790          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1791          *        -level_id must be parent level ID plus one
1792          */
1793         level = (pn == NULL) ? 0 : pn->level + 1;
1794         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1795                 return -rte_tm_error_set(error,
1796                         EINVAL,
1797                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1798                         NULL,
1799                         rte_strerror(EINVAL));
1800
1801         /* params: must not be NULL */
1802         if (params == NULL)
1803                 return -rte_tm_error_set(error,
1804                         EINVAL,
1805                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1806                         NULL,
1807                         rte_strerror(EINVAL));
1808
1809         /* params: per level checks */
1810         switch (level) {
1811         case TM_NODE_LEVEL_PORT:
1812                 status = node_add_check_port(dev, node_id,
1813                         parent_node_id, priority, weight, level_id,
1814                         params, error);
1815                 if (status)
1816                         return status;
1817                 break;
1818
1819         case TM_NODE_LEVEL_SUBPORT:
1820                 status = node_add_check_subport(dev, node_id,
1821                         parent_node_id, priority, weight, level_id,
1822                         params, error);
1823                 if (status)
1824                         return status;
1825                 break;
1826
1827         case TM_NODE_LEVEL_PIPE:
1828                 status = node_add_check_pipe(dev, node_id,
1829                         parent_node_id, priority, weight, level_id,
1830                         params, error);
1831                 if (status)
1832                         return status;
1833                 break;
1834
1835         case TM_NODE_LEVEL_TC:
1836                 status = node_add_check_tc(dev, node_id,
1837                         parent_node_id, priority, weight, level_id,
1838                         params, error);
1839                 if (status)
1840                         return status;
1841                 break;
1842
1843         case TM_NODE_LEVEL_QUEUE:
1844                 status = node_add_check_queue(dev, node_id,
1845                         parent_node_id, priority, weight, level_id,
1846                         params, error);
1847                 if (status)
1848                         return status;
1849                 break;
1850
1851         default:
1852                 return -rte_tm_error_set(error,
1853                         EINVAL,
1854                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1855                         NULL,
1856                         rte_strerror(EINVAL));
1857         }
1858
1859         return 0;
1860 }
1861
1862 /* Traffic manager node add */
1863 static int
1864 pmd_tm_node_add(struct rte_eth_dev *dev,
1865         uint32_t node_id,
1866         uint32_t parent_node_id,
1867         uint32_t priority,
1868         uint32_t weight,
1869         uint32_t level_id,
1870         struct rte_tm_node_params *params,
1871         struct rte_tm_error *error)
1872 {
1873         struct pmd_internals *p = dev->data->dev_private;
1874         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1875         struct tm_node *n;
1876         uint32_t i;
1877         int status;
1878
1879         /* Checks */
1880         if (p->soft.tm.hierarchy_frozen)
1881                 return -rte_tm_error_set(error,
1882                         EBUSY,
1883                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1884                         NULL,
1885                         rte_strerror(EBUSY));
1886
1887         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1888                 level_id, params, error);
1889         if (status)
1890                 return status;
1891
1892         /* Memory allocation */
1893         n = calloc(1, sizeof(struct tm_node));
1894         if (n == NULL)
1895                 return -rte_tm_error_set(error,
1896                         ENOMEM,
1897                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1898                         NULL,
1899                         rte_strerror(ENOMEM));
1900
1901         /* Fill in */
1902         n->node_id = node_id;
1903         n->parent_node_id = parent_node_id;
1904         n->priority = priority;
1905         n->weight = weight;
1906
1907         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1908                 n->parent_node = tm_node_search(dev, parent_node_id);
1909                 n->level = n->parent_node->level + 1;
1910         }
1911
1912         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1913                 n->shaper_profile = tm_shaper_profile_search(dev,
1914                         params->shaper_profile_id);
1915
1916         if (n->level == TM_NODE_LEVEL_QUEUE &&
1917                 params->leaf.cman == RTE_TM_CMAN_WRED)
1918                 n->wred_profile = tm_wred_profile_search(dev,
1919                         params->leaf.wred.wred_profile_id);
1920
1921         memcpy(&n->params, params, sizeof(n->params));
1922
1923         /* Add to list */
1924         TAILQ_INSERT_TAIL(nl, n, node);
1925         p->soft.tm.h.n_nodes++;
1926
1927         /* Update dependencies */
1928         if (n->parent_node)
1929                 n->parent_node->n_children++;
1930
1931         if (n->shaper_profile)
1932                 n->shaper_profile->n_users++;
1933
1934         for (i = 0; i < params->n_shared_shapers; i++) {
1935                 struct tm_shared_shaper *ss;
1936
1937                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1938                 ss->n_users++;
1939         }
1940
1941         if (n->wred_profile)
1942                 n->wred_profile->n_users++;
1943
1944         p->soft.tm.h.n_tm_nodes[n->level]++;
1945
1946         return 0;
1947 }
1948
1949 /* Traffic manager node delete */
1950 static int
1951 pmd_tm_node_delete(struct rte_eth_dev *dev,
1952         uint32_t node_id,
1953         struct rte_tm_error *error)
1954 {
1955         struct pmd_internals *p = dev->data->dev_private;
1956         struct tm_node *n;
1957         uint32_t i;
1958
1959         /* Check hierarchy changes are currently allowed */
1960         if (p->soft.tm.hierarchy_frozen)
1961                 return -rte_tm_error_set(error,
1962                         EBUSY,
1963                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1964                         NULL,
1965                         rte_strerror(EBUSY));
1966
1967         /* Check existing */
1968         n = tm_node_search(dev, node_id);
1969         if (n == NULL)
1970                 return -rte_tm_error_set(error,
1971                         EINVAL,
1972                         RTE_TM_ERROR_TYPE_NODE_ID,
1973                         NULL,
1974                         rte_strerror(EINVAL));
1975
1976         /* Check unused */
1977         if (n->n_children)
1978                 return -rte_tm_error_set(error,
1979                         EBUSY,
1980                         RTE_TM_ERROR_TYPE_NODE_ID,
1981                         NULL,
1982                         rte_strerror(EBUSY));
1983
1984         /* Update dependencies */
1985         p->soft.tm.h.n_tm_nodes[n->level]--;
1986
1987         if (n->wred_profile)
1988                 n->wred_profile->n_users--;
1989
1990         for (i = 0; i < n->params.n_shared_shapers; i++) {
1991                 struct tm_shared_shaper *ss;
1992
1993                 ss = tm_shared_shaper_search(dev,
1994                                 n->params.shared_shaper_id[i]);
1995                 ss->n_users--;
1996         }
1997
1998         if (n->shaper_profile)
1999                 n->shaper_profile->n_users--;
2000
2001         if (n->parent_node)
2002                 n->parent_node->n_children--;
2003
2004         /* Remove from list */
2005         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2006         p->soft.tm.h.n_nodes--;
2007         free(n);
2008
2009         return 0;
2010 }
2011
2012
2013 static void
2014 pipe_profile_build(struct rte_eth_dev *dev,
2015         struct tm_node *np,
2016         struct rte_sched_pipe_params *pp)
2017 {
2018         struct pmd_internals *p = dev->data->dev_private;
2019         struct tm_hierarchy *h = &p->soft.tm.h;
2020         struct tm_node_list *nl = &h->nodes;
2021         struct tm_node *nt, *nq;
2022
2023         memset(pp, 0, sizeof(*pp));
2024
2025         /* Pipe */
2026         pp->tb_rate = np->shaper_profile->params.peak.rate;
2027         pp->tb_size = np->shaper_profile->params.peak.size;
2028
2029         /* Traffic Class (TC) */
2030         pp->tc_period = PIPE_TC_PERIOD;
2031
2032         pp->tc_ov_weight = np->weight;
2033
2034         TAILQ_FOREACH(nt, nl, node) {
2035                 uint32_t queue_id = 0;
2036
2037                 if (nt->level != TM_NODE_LEVEL_TC ||
2038                         nt->parent_node_id != np->node_id)
2039                         continue;
2040
2041                 pp->tc_rate[nt->priority] =
2042                         nt->shaper_profile->params.peak.rate;
2043
2044                 /* Queue */
2045                 TAILQ_FOREACH(nq, nl, node) {
2046
2047                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2048                                 nq->parent_node_id != nt->node_id)
2049                                 continue;
2050
2051                         if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2052                                 pp->wrr_weights[queue_id] = nq->weight;
2053
2054                         queue_id++;
2055                 }
2056         }
2057 }
2058
2059 static int
2060 pipe_profile_free_exists(struct rte_eth_dev *dev,
2061         uint32_t *pipe_profile_id)
2062 {
2063         struct pmd_internals *p = dev->data->dev_private;
2064         struct tm_params *t = &p->soft.tm.params;
2065
2066         if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2067                 *pipe_profile_id = t->n_pipe_profiles;
2068                 return 1;
2069         }
2070
2071         return 0;
2072 }
2073
2074 static int
2075 pipe_profile_exists(struct rte_eth_dev *dev,
2076         struct rte_sched_pipe_params *pp,
2077         uint32_t *pipe_profile_id)
2078 {
2079         struct pmd_internals *p = dev->data->dev_private;
2080         struct tm_params *t = &p->soft.tm.params;
2081         uint32_t i;
2082
2083         for (i = 0; i < t->n_pipe_profiles; i++)
2084                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2085                         if (pipe_profile_id)
2086                                 *pipe_profile_id = i;
2087                         return 1;
2088                 }
2089
2090         return 0;
2091 }
2092
2093 static void
2094 pipe_profile_install(struct rte_eth_dev *dev,
2095         struct rte_sched_pipe_params *pp,
2096         uint32_t pipe_profile_id)
2097 {
2098         struct pmd_internals *p = dev->data->dev_private;
2099         struct tm_params *t = &p->soft.tm.params;
2100
2101         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2102         t->n_pipe_profiles++;
2103 }
2104
2105 static void
2106 pipe_profile_mark(struct rte_eth_dev *dev,
2107         uint32_t subport_id,
2108         uint32_t pipe_id,
2109         uint32_t pipe_profile_id)
2110 {
2111         struct pmd_internals *p = dev->data->dev_private;
2112         struct tm_hierarchy *h = &p->soft.tm.h;
2113         struct tm_params *t = &p->soft.tm.params;
2114         uint32_t n_pipes_per_subport, pos;
2115
2116         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2117                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2118         pos = subport_id * n_pipes_per_subport + pipe_id;
2119
2120         t->pipe_to_profile[pos] = pipe_profile_id;
2121 }
2122
2123 static struct rte_sched_pipe_params *
2124 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2125 {
2126         struct pmd_internals *p = dev->data->dev_private;
2127         struct tm_hierarchy *h = &p->soft.tm.h;
2128         struct tm_params *t = &p->soft.tm.params;
2129         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2130                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2131
2132         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2133         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2134
2135         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2136         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2137
2138         return &t->pipe_profiles[pipe_profile_id];
2139 }
2140
2141 static int
2142 pipe_profiles_generate(struct rte_eth_dev *dev)
2143 {
2144         struct pmd_internals *p = dev->data->dev_private;
2145         struct tm_hierarchy *h = &p->soft.tm.h;
2146         struct tm_node_list *nl = &h->nodes;
2147         struct tm_node *ns, *np;
2148         uint32_t subport_id;
2149
2150         /* Objective: Fill in the following fields in struct tm_params:
2151          *    - pipe_profiles
2152          *    - n_pipe_profiles
2153          *    - pipe_to_profile
2154          */
2155
2156         subport_id = 0;
2157         TAILQ_FOREACH(ns, nl, node) {
2158                 uint32_t pipe_id;
2159
2160                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2161                         continue;
2162
2163                 pipe_id = 0;
2164                 TAILQ_FOREACH(np, nl, node) {
2165                         struct rte_sched_pipe_params pp;
2166                         uint32_t pos;
2167
2168                         if (np->level != TM_NODE_LEVEL_PIPE ||
2169                                 np->parent_node_id != ns->node_id)
2170                                 continue;
2171
2172                         pipe_profile_build(dev, np, &pp);
2173
2174                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2175                                 if (!pipe_profile_free_exists(dev, &pos))
2176                                         return -1;
2177
2178                                 pipe_profile_install(dev, &pp, pos);
2179                         }
2180
2181                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2182
2183                         pipe_id++;
2184                 }
2185
2186                 subport_id++;
2187         }
2188
2189         return 0;
2190 }
2191
2192 static struct tm_wred_profile *
2193 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2194 {
2195         struct pmd_internals *p = dev->data->dev_private;
2196         struct tm_hierarchy *h = &p->soft.tm.h;
2197         struct tm_node_list *nl = &h->nodes;
2198         struct tm_node *nq;
2199
2200         TAILQ_FOREACH(nq, nl, node) {
2201                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2202                         nq->parent_node->priority != tc_id)
2203                         continue;
2204
2205                 return nq->wred_profile;
2206         }
2207
2208         return NULL;
2209 }
2210
2211 #ifdef RTE_SCHED_RED
2212
2213 static void
2214 wred_profiles_set(struct rte_eth_dev *dev)
2215 {
2216         struct pmd_internals *p = dev->data->dev_private;
2217         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2218
2219         uint32_t tc_id;
2220         enum rte_color color;
2221
2222         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2223                 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2224                         struct rte_red_params *dst =
2225                                 &pp->red_params[tc_id][color];
2226                         struct tm_wred_profile *src_wp =
2227                                 tm_tc_wred_profile_get(dev, tc_id);
2228                         struct rte_tm_red_params *src =
2229                                 &src_wp->params.red_params[color];
2230
2231                         memcpy(dst, src, sizeof(*dst));
2232                 }
2233 }
2234
2235 #else
2236
2237 #define wred_profiles_set(dev)
2238
2239 #endif
2240
2241 static struct tm_shared_shaper *
2242 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2243 {
2244         return (tc_node->params.n_shared_shapers) ?
2245                 tm_shared_shaper_search(dev,
2246                         tc_node->params.shared_shaper_id[0]) :
2247                 NULL;
2248 }
2249
2250 static struct tm_shared_shaper *
2251 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2252         struct tm_node *subport_node,
2253         uint32_t tc_id)
2254 {
2255         struct pmd_internals *p = dev->data->dev_private;
2256         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2257         struct tm_node *n;
2258
2259         TAILQ_FOREACH(n, nl, node) {
2260                 if (n->level != TM_NODE_LEVEL_TC ||
2261                         n->parent_node->parent_node_id !=
2262                                 subport_node->node_id ||
2263                         n->priority != tc_id)
2264                         continue;
2265
2266                 return tm_tc_shared_shaper_get(dev, n);
2267         }
2268
2269         return NULL;
2270 }
2271
2272 static int
2273 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2274 {
2275         struct pmd_internals *p = dev->data->dev_private;
2276         struct tm_hierarchy *h = &p->soft.tm.h;
2277         struct tm_node_list *nl = &h->nodes;
2278         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2279         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2280         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2281         struct tm_shared_shaper *ss;
2282
2283         uint32_t n_pipes_per_subport;
2284
2285         /* Root node exists. */
2286         if (nr == NULL)
2287                 return -rte_tm_error_set(error,
2288                         EINVAL,
2289                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2290                         NULL,
2291                         rte_strerror(EINVAL));
2292
2293         /* There is at least one subport, max is not exceeded. */
2294         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2295                 return -rte_tm_error_set(error,
2296                         EINVAL,
2297                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2298                         NULL,
2299                         rte_strerror(EINVAL));
2300
2301         /* There is at least one pipe. */
2302         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2303                 return -rte_tm_error_set(error,
2304                         EINVAL,
2305                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2306                         NULL,
2307                         rte_strerror(EINVAL));
2308
2309         /* Number of pipes is the same for all subports. Maximum number of pipes
2310          * per subport is not exceeded.
2311          */
2312         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2313                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2314
2315         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2316                 return -rte_tm_error_set(error,
2317                         EINVAL,
2318                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2319                         NULL,
2320                         rte_strerror(EINVAL));
2321
2322         TAILQ_FOREACH(ns, nl, node) {
2323                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2324                         continue;
2325
2326                 if (ns->n_children != n_pipes_per_subport)
2327                         return -rte_tm_error_set(error,
2328                                 EINVAL,
2329                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2330                                 NULL,
2331                                 rte_strerror(EINVAL));
2332         }
2333
2334         /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2335         TAILQ_FOREACH(np, nl, node) {
2336                 uint32_t mask = 0, mask_expected =
2337                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2338                                 uint32_t);
2339
2340                 if (np->level != TM_NODE_LEVEL_PIPE)
2341                         continue;
2342
2343                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2344                         return -rte_tm_error_set(error,
2345                                 EINVAL,
2346                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2347                                 NULL,
2348                                 rte_strerror(EINVAL));
2349
2350                 TAILQ_FOREACH(nt, nl, node) {
2351                         if (nt->level != TM_NODE_LEVEL_TC ||
2352                                 nt->parent_node_id != np->node_id)
2353                                 continue;
2354
2355                         mask |= 1 << nt->priority;
2356                 }
2357
2358                 if (mask != mask_expected)
2359                         return -rte_tm_error_set(error,
2360                                 EINVAL,
2361                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2362                                 NULL,
2363                                 rte_strerror(EINVAL));
2364         }
2365
2366         /** Each Strict priority TC has exactly 1 packet queues while
2367          *      lowest priority TC (Best-effort) has 4 queues.
2368          */
2369         TAILQ_FOREACH(nt, nl, node) {
2370                 if (nt->level != TM_NODE_LEVEL_TC)
2371                         continue;
2372
2373                 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2374                         return -rte_tm_error_set(error,
2375                                 EINVAL,
2376                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2377                                 NULL,
2378                                 rte_strerror(EINVAL));
2379         }
2380
2381         /**
2382          * Shared shapers:
2383          *    -For each TC #i, all pipes in the same subport use the same
2384          *     shared shaper (or no shared shaper) for their TC#i.
2385          *    -Each shared shaper needs to have at least one user. All its
2386          *     users have to be TC nodes with the same priority and the same
2387          *     subport.
2388          */
2389         TAILQ_FOREACH(ns, nl, node) {
2390                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2391                 uint32_t id;
2392
2393                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2394                         continue;
2395
2396                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2397                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2398
2399                 TAILQ_FOREACH(nt, nl, node) {
2400                         struct tm_shared_shaper *subport_ss, *tc_ss;
2401
2402                         if (nt->level != TM_NODE_LEVEL_TC ||
2403                                 nt->parent_node->parent_node_id !=
2404                                         ns->node_id)
2405                                 continue;
2406
2407                         subport_ss = s[nt->priority];
2408                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2409
2410                         if (subport_ss == NULL && tc_ss == NULL)
2411                                 continue;
2412
2413                         if ((subport_ss == NULL && tc_ss != NULL) ||
2414                                 (subport_ss != NULL && tc_ss == NULL) ||
2415                                 subport_ss->shared_shaper_id !=
2416                                         tc_ss->shared_shaper_id)
2417                                 return -rte_tm_error_set(error,
2418                                         EINVAL,
2419                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2420                                         NULL,
2421                                         rte_strerror(EINVAL));
2422                 }
2423         }
2424
2425         TAILQ_FOREACH(ss, ssl, node) {
2426                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2427                 uint32_t n_users = 0;
2428
2429                 if (nt_any != NULL)
2430                         TAILQ_FOREACH(nt, nl, node) {
2431                                 if (nt->level != TM_NODE_LEVEL_TC ||
2432                                         nt->priority != nt_any->priority ||
2433                                         nt->parent_node->parent_node_id !=
2434                                         nt_any->parent_node->parent_node_id)
2435                                         continue;
2436
2437                                 n_users++;
2438                         }
2439
2440                 if (ss->n_users == 0 || ss->n_users != n_users)
2441                         return -rte_tm_error_set(error,
2442                                 EINVAL,
2443                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2444                                 NULL,
2445                                 rte_strerror(EINVAL));
2446         }
2447
2448         /* Not too many pipe profiles. */
2449         if (pipe_profiles_generate(dev))
2450                 return -rte_tm_error_set(error,
2451                         EINVAL,
2452                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2453                         NULL,
2454                         rte_strerror(EINVAL));
2455
2456         /**
2457          * WRED (when used, i.e. at least one WRED profile defined):
2458          *    -Each WRED profile must have at least one user.
2459          *    -All leaf nodes must have their private WRED context enabled.
2460          *    -For each TC #i, all leaf nodes must use the same WRED profile
2461          *     for their private WRED context.
2462          */
2463         if (h->n_wred_profiles) {
2464                 struct tm_wred_profile *wp;
2465                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2466                 uint32_t id;
2467
2468                 TAILQ_FOREACH(wp, wpl, node)
2469                         if (wp->n_users == 0)
2470                                 return -rte_tm_error_set(error,
2471                                         EINVAL,
2472                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2473                                         NULL,
2474                                         rte_strerror(EINVAL));
2475
2476                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2477                         w[id] = tm_tc_wred_profile_get(dev, id);
2478
2479                         if (w[id] == NULL)
2480                                 return -rte_tm_error_set(error,
2481                                         EINVAL,
2482                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2483                                         NULL,
2484                                         rte_strerror(EINVAL));
2485                 }
2486
2487                 TAILQ_FOREACH(nq, nl, node) {
2488                         uint32_t id;
2489
2490                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2491                                 continue;
2492
2493                         id = nq->parent_node->priority;
2494
2495                         if (nq->wred_profile == NULL ||
2496                                 nq->wred_profile->wred_profile_id !=
2497                                         w[id]->wred_profile_id)
2498                                 return -rte_tm_error_set(error,
2499                                         EINVAL,
2500                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2501                                         NULL,
2502                                         rte_strerror(EINVAL));
2503                 }
2504         }
2505
2506         return 0;
2507 }
2508
2509 static void
2510 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2511 {
2512         struct pmd_internals *p = dev->data->dev_private;
2513         struct tm_params *t = &p->soft.tm.params;
2514         struct tm_hierarchy *h = &p->soft.tm.h;
2515
2516         struct tm_node_list *nl = &h->nodes;
2517         struct tm_node *root = tm_root_node_present(dev), *n;
2518
2519         uint32_t subport_id;
2520
2521         t->port_params = (struct rte_sched_port_params) {
2522                 .name = dev->data->name,
2523                 .socket = dev->data->numa_node,
2524                 .rate = root->shaper_profile->params.peak.rate,
2525                 .mtu = dev->data->mtu,
2526                 .frame_overhead =
2527                         root->shaper_profile->params.pkt_length_adjust,
2528                 .n_subports_per_port = root->n_children,
2529                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2530                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2531                 .qsize = {p->params.tm.qsize[0],
2532                         p->params.tm.qsize[1],
2533                         p->params.tm.qsize[2],
2534                         p->params.tm.qsize[3],
2535                         p->params.tm.qsize[4],
2536                         p->params.tm.qsize[5],
2537                         p->params.tm.qsize[6],
2538                         p->params.tm.qsize[7],
2539                         p->params.tm.qsize[8],
2540                         p->params.tm.qsize[9],
2541                         p->params.tm.qsize[10],
2542                         p->params.tm.qsize[11],
2543                         p->params.tm.qsize[12],
2544                 },
2545                 .pipe_profiles = t->pipe_profiles,
2546                 .n_pipe_profiles = t->n_pipe_profiles,
2547                 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2548         };
2549
2550         wred_profiles_set(dev);
2551
2552         subport_id = 0;
2553         TAILQ_FOREACH(n, nl, node) {
2554                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2555                 uint32_t i;
2556
2557                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2558                         continue;
2559
2560                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2561                         struct tm_shared_shaper *ss;
2562                         struct tm_shaper_profile *sp;
2563
2564                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2565                         sp = (ss) ? tm_shaper_profile_search(dev,
2566                                 ss->shaper_profile_id) :
2567                                 n->shaper_profile;
2568                         tc_rate[i] = sp->params.peak.rate;
2569                 }
2570
2571                 t->subport_params[subport_id] =
2572                         (struct rte_sched_subport_params) {
2573                                 .tb_rate = n->shaper_profile->params.peak.rate,
2574                                 .tb_size = n->shaper_profile->params.peak.size,
2575
2576                                 .tc_rate = {tc_rate[0],
2577                                         tc_rate[1],
2578                                         tc_rate[2],
2579                                         tc_rate[3],
2580                                         tc_rate[4],
2581                                         tc_rate[5],
2582                                         tc_rate[6],
2583                                         tc_rate[7],
2584                                         tc_rate[8],
2585                                         tc_rate[9],
2586                                         tc_rate[10],
2587                                         tc_rate[11],
2588                                         tc_rate[12],
2589                                 },
2590                                 .tc_period = SUBPORT_TC_PERIOD,
2591                 };
2592
2593                 subport_id++;
2594         }
2595 }
2596
2597 /* Traffic manager hierarchy commit */
2598 static int
2599 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2600         int clear_on_fail,
2601         struct rte_tm_error *error)
2602 {
2603         struct pmd_internals *p = dev->data->dev_private;
2604         int status;
2605
2606         /* Checks */
2607         if (p->soft.tm.hierarchy_frozen)
2608                 return -rte_tm_error_set(error,
2609                         EBUSY,
2610                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2611                         NULL,
2612                         rte_strerror(EBUSY));
2613
2614         status = hierarchy_commit_check(dev, error);
2615         if (status) {
2616                 if (clear_on_fail)
2617                         tm_hierarchy_free(p);
2618
2619                 return status;
2620         }
2621
2622         /* Create blueprints */
2623         hierarchy_blueprints_create(dev);
2624
2625         /* Freeze hierarchy */
2626         p->soft.tm.hierarchy_frozen = 1;
2627
2628         return 0;
2629 }
2630
2631 #ifdef RTE_SCHED_SUBPORT_TC_OV
2632
2633 static int
2634 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2635 {
2636         struct pmd_internals *p = dev->data->dev_private;
2637         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2638
2639         struct tm_node *ns = np->parent_node;
2640         uint32_t subport_id = tm_node_subport_id(dev, ns);
2641
2642         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2643         struct rte_sched_pipe_params profile1;
2644         uint32_t pipe_profile_id;
2645
2646         /* Derive new pipe profile. */
2647         memcpy(&profile1, profile0, sizeof(profile1));
2648         profile1.tc_ov_weight = (uint8_t)weight;
2649
2650         /* Since implementation does not allow adding more pipe profiles after
2651          * port configuration, the pipe configuration can be successfully
2652          * updated only if the new profile is also part of the existing set of
2653          * pipe profiles.
2654          */
2655         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2656                 return -1;
2657
2658         /* Update the pipe profile used by the current pipe. */
2659         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2660                 (int32_t)pipe_profile_id))
2661                 return -1;
2662
2663         /* Commit changes. */
2664         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2665         np->weight = weight;
2666
2667         return 0;
2668 }
2669
2670 #endif
2671
2672 static int
2673 update_queue_weight(struct rte_eth_dev *dev,
2674         struct tm_node *nq, uint32_t weight)
2675 {
2676         struct pmd_internals *p = dev->data->dev_private;
2677         uint32_t queue_id = tm_node_queue_id(dev, nq);
2678
2679         struct tm_node *nt = nq->parent_node;
2680
2681         struct tm_node *np = nt->parent_node;
2682         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2683
2684         struct tm_node *ns = np->parent_node;
2685         uint32_t subport_id = tm_node_subport_id(dev, ns);
2686
2687         uint32_t pipe_be_queue_id =
2688                 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2689
2690         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2691         struct rte_sched_pipe_params profile1;
2692         uint32_t pipe_profile_id;
2693
2694         /* Derive new pipe profile. */
2695         memcpy(&profile1, profile0, sizeof(profile1));
2696         profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2697
2698         /* Since implementation does not allow adding more pipe profiles after
2699          * port configuration, the pipe configuration can be successfully
2700          * updated only if the new profile is also part of the existing set
2701          * of pipe profiles.
2702          */
2703         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2704                 return -1;
2705
2706         /* Update the pipe profile used by the current pipe. */
2707         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2708                 (int32_t)pipe_profile_id))
2709                 return -1;
2710
2711         /* Commit changes. */
2712         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2713         nq->weight = weight;
2714
2715         return 0;
2716 }
2717
2718 /* Traffic manager node parent update */
2719 static int
2720 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2721         uint32_t node_id,
2722         uint32_t parent_node_id,
2723         uint32_t priority,
2724         uint32_t weight,
2725         struct rte_tm_error *error)
2726 {
2727         struct tm_node *n;
2728
2729         /* Port must be started and TM used. */
2730         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2731                 return -rte_tm_error_set(error,
2732                         EBUSY,
2733                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2734                         NULL,
2735                         rte_strerror(EBUSY));
2736
2737         /* Node must be valid */
2738         n = tm_node_search(dev, node_id);
2739         if (n == NULL)
2740                 return -rte_tm_error_set(error,
2741                         EINVAL,
2742                         RTE_TM_ERROR_TYPE_NODE_ID,
2743                         NULL,
2744                         rte_strerror(EINVAL));
2745
2746         /* Parent node must be the same */
2747         if (n->parent_node_id != parent_node_id)
2748                 return -rte_tm_error_set(error,
2749                         EINVAL,
2750                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2751                         NULL,
2752                         rte_strerror(EINVAL));
2753
2754         /* Priority must be the same */
2755         if (n->priority != priority)
2756                 return -rte_tm_error_set(error,
2757                         EINVAL,
2758                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2759                         NULL,
2760                         rte_strerror(EINVAL));
2761
2762         /* weight: must be 1 .. 255 */
2763         if (weight == 0 || weight >= UINT8_MAX)
2764                 return -rte_tm_error_set(error,
2765                         EINVAL,
2766                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2767                         NULL,
2768                         rte_strerror(EINVAL));
2769
2770         switch (n->level) {
2771         case TM_NODE_LEVEL_PORT:
2772                 return -rte_tm_error_set(error,
2773                         EINVAL,
2774                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2775                         NULL,
2776                         rte_strerror(EINVAL));
2777                 /* fall-through */
2778         case TM_NODE_LEVEL_SUBPORT:
2779                 return -rte_tm_error_set(error,
2780                         EINVAL,
2781                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2782                         NULL,
2783                         rte_strerror(EINVAL));
2784                 /* fall-through */
2785         case TM_NODE_LEVEL_PIPE:
2786 #ifdef RTE_SCHED_SUBPORT_TC_OV
2787                 if (update_pipe_weight(dev, n, weight))
2788                         return -rte_tm_error_set(error,
2789                                 EINVAL,
2790                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2791                                 NULL,
2792                                 rte_strerror(EINVAL));
2793                 return 0;
2794 #else
2795                 return -rte_tm_error_set(error,
2796                         EINVAL,
2797                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2798                         NULL,
2799                         rte_strerror(EINVAL));
2800 #endif
2801                 /* fall-through */
2802         case TM_NODE_LEVEL_TC:
2803                 return -rte_tm_error_set(error,
2804                         EINVAL,
2805                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2806                         NULL,
2807                         rte_strerror(EINVAL));
2808                 /* fall-through */
2809         case TM_NODE_LEVEL_QUEUE:
2810                 /* fall-through */
2811         default:
2812                 if (update_queue_weight(dev, n, weight))
2813                         return -rte_tm_error_set(error,
2814                                 EINVAL,
2815                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2816                                 NULL,
2817                                 rte_strerror(EINVAL));
2818                 return 0;
2819         }
2820 }
2821
2822 static int
2823 update_subport_rate(struct rte_eth_dev *dev,
2824         struct tm_node *ns,
2825         struct tm_shaper_profile *sp)
2826 {
2827         struct pmd_internals *p = dev->data->dev_private;
2828         uint32_t subport_id = tm_node_subport_id(dev, ns);
2829
2830         struct rte_sched_subport_params subport_params;
2831
2832         /* Derive new subport configuration. */
2833         memcpy(&subport_params,
2834                 &p->soft.tm.params.subport_params[subport_id],
2835                 sizeof(subport_params));
2836         subport_params.tb_rate = sp->params.peak.rate;
2837         subport_params.tb_size = sp->params.peak.size;
2838
2839         /* Update the subport configuration. */
2840         if (rte_sched_subport_config(SCHED(p), subport_id,
2841                 &subport_params))
2842                 return -1;
2843
2844         /* Commit changes. */
2845         ns->shaper_profile->n_users--;
2846
2847         ns->shaper_profile = sp;
2848         ns->params.shaper_profile_id = sp->shaper_profile_id;
2849         sp->n_users++;
2850
2851         memcpy(&p->soft.tm.params.subport_params[subport_id],
2852                 &subport_params,
2853                 sizeof(subport_params));
2854
2855         return 0;
2856 }
2857
2858 static int
2859 update_pipe_rate(struct rte_eth_dev *dev,
2860         struct tm_node *np,
2861         struct tm_shaper_profile *sp)
2862 {
2863         struct pmd_internals *p = dev->data->dev_private;
2864         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2865
2866         struct tm_node *ns = np->parent_node;
2867         uint32_t subport_id = tm_node_subport_id(dev, ns);
2868
2869         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2870         struct rte_sched_pipe_params profile1;
2871         uint32_t pipe_profile_id;
2872
2873         /* Derive new pipe profile. */
2874         memcpy(&profile1, profile0, sizeof(profile1));
2875         profile1.tb_rate = sp->params.peak.rate;
2876         profile1.tb_size = sp->params.peak.size;
2877
2878         /* Since implementation does not allow adding more pipe profiles after
2879          * port configuration, the pipe configuration can be successfully
2880          * updated only if the new profile is also part of the existing set of
2881          * pipe profiles.
2882          */
2883         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2884                 return -1;
2885
2886         /* Update the pipe profile used by the current pipe. */
2887         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2888                 (int32_t)pipe_profile_id))
2889                 return -1;
2890
2891         /* Commit changes. */
2892         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2893         np->shaper_profile->n_users--;
2894         np->shaper_profile = sp;
2895         np->params.shaper_profile_id = sp->shaper_profile_id;
2896         sp->n_users++;
2897
2898         return 0;
2899 }
2900
2901 static int
2902 update_tc_rate(struct rte_eth_dev *dev,
2903         struct tm_node *nt,
2904         struct tm_shaper_profile *sp)
2905 {
2906         struct pmd_internals *p = dev->data->dev_private;
2907         uint32_t tc_id = tm_node_tc_id(dev, nt);
2908
2909         struct tm_node *np = nt->parent_node;
2910         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2911
2912         struct tm_node *ns = np->parent_node;
2913         uint32_t subport_id = tm_node_subport_id(dev, ns);
2914
2915         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2916         struct rte_sched_pipe_params profile1;
2917         uint32_t pipe_profile_id;
2918
2919         /* Derive new pipe profile. */
2920         memcpy(&profile1, profile0, sizeof(profile1));
2921         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2922
2923         /* Since implementation does not allow adding more pipe profiles after
2924          * port configuration, the pipe configuration can be successfully
2925          * updated only if the new profile is also part of the existing set of
2926          * pipe profiles.
2927          */
2928         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2929                 return -1;
2930
2931         /* Update the pipe profile used by the current pipe. */
2932         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2933                 (int32_t)pipe_profile_id))
2934                 return -1;
2935
2936         /* Commit changes. */
2937         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2938         nt->shaper_profile->n_users--;
2939         nt->shaper_profile = sp;
2940         nt->params.shaper_profile_id = sp->shaper_profile_id;
2941         sp->n_users++;
2942
2943         return 0;
2944 }
2945
2946 /* Traffic manager node shaper update */
2947 static int
2948 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2949         uint32_t node_id,
2950         uint32_t shaper_profile_id,
2951         struct rte_tm_error *error)
2952 {
2953         struct tm_node *n;
2954         struct tm_shaper_profile *sp;
2955
2956         /* Port must be started and TM used. */
2957         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2958                 return -rte_tm_error_set(error,
2959                         EBUSY,
2960                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2961                         NULL,
2962                         rte_strerror(EBUSY));
2963
2964         /* Node must be valid */
2965         n = tm_node_search(dev, node_id);
2966         if (n == NULL)
2967                 return -rte_tm_error_set(error,
2968                         EINVAL,
2969                         RTE_TM_ERROR_TYPE_NODE_ID,
2970                         NULL,
2971                         rte_strerror(EINVAL));
2972
2973         /* Shaper profile must be valid. */
2974         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2975         if (sp == NULL)
2976                 return -rte_tm_error_set(error,
2977                         EINVAL,
2978                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2979                         NULL,
2980                         rte_strerror(EINVAL));
2981
2982         switch (n->level) {
2983         case TM_NODE_LEVEL_PORT:
2984                 return -rte_tm_error_set(error,
2985                         EINVAL,
2986                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2987                         NULL,
2988                         rte_strerror(EINVAL));
2989                 /* fall-through */
2990         case TM_NODE_LEVEL_SUBPORT:
2991                 if (update_subport_rate(dev, n, sp))
2992                         return -rte_tm_error_set(error,
2993                                 EINVAL,
2994                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2995                                 NULL,
2996                                 rte_strerror(EINVAL));
2997                 return 0;
2998                 /* fall-through */
2999         case TM_NODE_LEVEL_PIPE:
3000                 if (update_pipe_rate(dev, n, sp))
3001                         return -rte_tm_error_set(error,
3002                                 EINVAL,
3003                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3004                                 NULL,
3005                                 rte_strerror(EINVAL));
3006                 return 0;
3007                 /* fall-through */
3008         case TM_NODE_LEVEL_TC:
3009                 if (update_tc_rate(dev, n, sp))
3010                         return -rte_tm_error_set(error,
3011                                 EINVAL,
3012                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3013                                 NULL,
3014                                 rte_strerror(EINVAL));
3015                 return 0;
3016                 /* fall-through */
3017         case TM_NODE_LEVEL_QUEUE:
3018                 /* fall-through */
3019         default:
3020                 return -rte_tm_error_set(error,
3021                         EINVAL,
3022                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3023                         NULL,
3024                         rte_strerror(EINVAL));
3025         }
3026 }
3027
3028 static inline uint32_t
3029 tm_port_queue_id(struct rte_eth_dev *dev,
3030         uint32_t port_subport_id,
3031         uint32_t subport_pipe_id,
3032         uint32_t pipe_tc_id,
3033         uint32_t tc_queue_id)
3034 {
3035         struct pmd_internals *p = dev->data->dev_private;
3036         struct tm_hierarchy *h = &p->soft.tm.h;
3037         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3038                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3039
3040         uint32_t port_pipe_id =
3041                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3042
3043         uint32_t port_queue_id =
3044                 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3045
3046         return port_queue_id;
3047 }
3048
3049 static int
3050 read_port_stats(struct rte_eth_dev *dev,
3051         struct tm_node *nr,
3052         struct rte_tm_node_stats *stats,
3053         uint64_t *stats_mask,
3054         int clear)
3055 {
3056         struct pmd_internals *p = dev->data->dev_private;
3057         struct tm_hierarchy *h = &p->soft.tm.h;
3058         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3059         uint32_t subport_id;
3060
3061         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3062                 struct rte_sched_subport_stats s;
3063                 uint32_t tc_ov, id;
3064
3065                 /* Stats read */
3066                 int status = rte_sched_subport_read_stats(SCHED(p),
3067                         subport_id,
3068                         &s,
3069                         &tc_ov);
3070                 if (status)
3071                         return status;
3072
3073                 /* Stats accumulate */
3074                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3075                         nr->stats.n_pkts +=
3076                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3077                         nr->stats.n_bytes +=
3078                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3079                         nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3080                                 s.n_pkts_tc_dropped[id];
3081                         nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3082                                 s.n_bytes_tc_dropped[id];
3083                 }
3084         }
3085
3086         /* Stats copy */
3087         if (stats)
3088                 memcpy(stats, &nr->stats, sizeof(*stats));
3089
3090         if (stats_mask)
3091                 *stats_mask = STATS_MASK_DEFAULT;
3092
3093         /* Stats clear */
3094         if (clear)
3095                 memset(&nr->stats, 0, sizeof(nr->stats));
3096
3097         return 0;
3098 }
3099
3100 static int
3101 read_subport_stats(struct rte_eth_dev *dev,
3102         struct tm_node *ns,
3103         struct rte_tm_node_stats *stats,
3104         uint64_t *stats_mask,
3105         int clear)
3106 {
3107         struct pmd_internals *p = dev->data->dev_private;
3108         uint32_t subport_id = tm_node_subport_id(dev, ns);
3109         struct rte_sched_subport_stats s;
3110         uint32_t tc_ov, tc_id;
3111
3112         /* Stats read */
3113         int status = rte_sched_subport_read_stats(SCHED(p),
3114                 subport_id,
3115                 &s,
3116                 &tc_ov);
3117         if (status)
3118                 return status;
3119
3120         /* Stats accumulate */
3121         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3122                 ns->stats.n_pkts +=
3123                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3124                 ns->stats.n_bytes +=
3125                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3126                 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3127                         s.n_pkts_tc_dropped[tc_id];
3128                 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3129                         s.n_bytes_tc_dropped[tc_id];
3130         }
3131
3132         /* Stats copy */
3133         if (stats)
3134                 memcpy(stats, &ns->stats, sizeof(*stats));
3135
3136         if (stats_mask)
3137                 *stats_mask = STATS_MASK_DEFAULT;
3138
3139         /* Stats clear */
3140         if (clear)
3141                 memset(&ns->stats, 0, sizeof(ns->stats));
3142
3143         return 0;
3144 }
3145
3146 static int
3147 read_pipe_stats(struct rte_eth_dev *dev,
3148         struct tm_node *np,
3149         struct rte_tm_node_stats *stats,
3150         uint64_t *stats_mask,
3151         int clear)
3152 {
3153         struct pmd_internals *p = dev->data->dev_private;
3154
3155         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3156
3157         struct tm_node *ns = np->parent_node;
3158         uint32_t subport_id = tm_node_subport_id(dev, ns);
3159         uint32_t tc_id, queue_id;
3160         uint32_t i;
3161
3162         /* Stats read */
3163         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3164                 struct rte_sched_queue_stats s;
3165                 uint16_t qlen;
3166
3167                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3168                         tc_id = i;
3169                         queue_id = i;
3170                 } else {
3171                         tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3172                         queue_id = i - tc_id;
3173                 }
3174
3175                 uint32_t qid = tm_port_queue_id(dev,
3176                         subport_id,
3177                         pipe_id,
3178                         tc_id,
3179                         queue_id);
3180
3181                 int status = rte_sched_queue_read_stats(SCHED(p),
3182                         qid,
3183                         &s,
3184                         &qlen);
3185                 if (status)
3186                         return status;
3187
3188                 /* Stats accumulate */
3189                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3190                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3191                 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3192                 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3193                         s.n_bytes_dropped;
3194                 np->stats.leaf.n_pkts_queued = qlen;
3195         }
3196
3197         /* Stats copy */
3198         if (stats)
3199                 memcpy(stats, &np->stats, sizeof(*stats));
3200
3201         if (stats_mask)
3202                 *stats_mask = STATS_MASK_DEFAULT;
3203
3204         /* Stats clear */
3205         if (clear)
3206                 memset(&np->stats, 0, sizeof(np->stats));
3207
3208         return 0;
3209 }
3210
3211 static int
3212 read_tc_stats(struct rte_eth_dev *dev,
3213         struct tm_node *nt,
3214         struct rte_tm_node_stats *stats,
3215         uint64_t *stats_mask,
3216         int clear)
3217 {
3218         struct pmd_internals *p = dev->data->dev_private;
3219
3220         uint32_t tc_id = tm_node_tc_id(dev, nt);
3221
3222         struct tm_node *np = nt->parent_node;
3223         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3224
3225         struct tm_node *ns = np->parent_node;
3226         uint32_t subport_id = tm_node_subport_id(dev, ns);
3227         struct rte_sched_queue_stats s;
3228         uint32_t qid, i;
3229         uint16_t qlen;
3230         int status;
3231
3232         /* Stats read */
3233         if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3234                 qid = tm_port_queue_id(dev,
3235                         subport_id,
3236                         pipe_id,
3237                         tc_id,
3238                         0);
3239
3240                 status = rte_sched_queue_read_stats(SCHED(p),
3241                         qid,
3242                         &s,
3243                         &qlen);
3244                 if (status)
3245                         return status;
3246
3247                 /* Stats accumulate */
3248                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3249                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3250                 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3251                 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3252                         s.n_bytes_dropped;
3253                 nt->stats.leaf.n_pkts_queued = qlen;
3254         } else {
3255                 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3256                         qid = tm_port_queue_id(dev,
3257                                 subport_id,
3258                                 pipe_id,
3259                                 tc_id,
3260                                 i);
3261
3262                         status = rte_sched_queue_read_stats(SCHED(p),
3263                                 qid,
3264                                 &s,
3265                                 &qlen);
3266                         if (status)
3267                                 return status;
3268
3269                         /* Stats accumulate */
3270                         nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3271                         nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3272                         nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3273                                 s.n_pkts_dropped;
3274                         nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3275                                 s.n_bytes_dropped;
3276                         nt->stats.leaf.n_pkts_queued = qlen;
3277                 }
3278         }
3279
3280         /* Stats copy */
3281         if (stats)
3282                 memcpy(stats, &nt->stats, sizeof(*stats));
3283
3284         if (stats_mask)
3285                 *stats_mask = STATS_MASK_DEFAULT;
3286
3287         /* Stats clear */
3288         if (clear)
3289                 memset(&nt->stats, 0, sizeof(nt->stats));
3290
3291         return 0;
3292 }
3293
3294 static int
3295 read_queue_stats(struct rte_eth_dev *dev,
3296         struct tm_node *nq,
3297         struct rte_tm_node_stats *stats,
3298         uint64_t *stats_mask,
3299         int clear)
3300 {
3301         struct pmd_internals *p = dev->data->dev_private;
3302         struct rte_sched_queue_stats s;
3303         uint16_t qlen;
3304
3305         uint32_t queue_id = tm_node_queue_id(dev, nq);
3306
3307         struct tm_node *nt = nq->parent_node;
3308         uint32_t tc_id = tm_node_tc_id(dev, nt);
3309
3310         struct tm_node *np = nt->parent_node;
3311         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3312
3313         struct tm_node *ns = np->parent_node;
3314         uint32_t subport_id = tm_node_subport_id(dev, ns);
3315
3316         /* Stats read */
3317         uint32_t qid = tm_port_queue_id(dev,
3318                 subport_id,
3319                 pipe_id,
3320                 tc_id,
3321                 queue_id);
3322
3323         int status = rte_sched_queue_read_stats(SCHED(p),
3324                 qid,
3325                 &s,
3326                 &qlen);
3327         if (status)
3328                 return status;
3329
3330         /* Stats accumulate */
3331         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3332         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3333         nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3334         nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3335                 s.n_bytes_dropped;
3336         nq->stats.leaf.n_pkts_queued = qlen;
3337
3338         /* Stats copy */
3339         if (stats)
3340                 memcpy(stats, &nq->stats, sizeof(*stats));
3341
3342         if (stats_mask)
3343                 *stats_mask = STATS_MASK_QUEUE;
3344
3345         /* Stats clear */
3346         if (clear)
3347                 memset(&nq->stats, 0, sizeof(nq->stats));
3348
3349         return 0;
3350 }
3351
3352 /* Traffic manager read stats counters for specific node */
3353 static int
3354 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3355         uint32_t node_id,
3356         struct rte_tm_node_stats *stats,
3357         uint64_t *stats_mask,
3358         int clear,
3359         struct rte_tm_error *error)
3360 {
3361         struct tm_node *n;
3362
3363         /* Port must be started and TM used. */
3364         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3365                 return -rte_tm_error_set(error,
3366                         EBUSY,
3367                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3368                         NULL,
3369                         rte_strerror(EBUSY));
3370
3371         /* Node must be valid */
3372         n = tm_node_search(dev, node_id);
3373         if (n == NULL)
3374                 return -rte_tm_error_set(error,
3375                         EINVAL,
3376                         RTE_TM_ERROR_TYPE_NODE_ID,
3377                         NULL,
3378                         rte_strerror(EINVAL));
3379
3380         switch (n->level) {
3381         case TM_NODE_LEVEL_PORT:
3382                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3383                         return -rte_tm_error_set(error,
3384                                 EINVAL,
3385                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3386                                 NULL,
3387                                 rte_strerror(EINVAL));
3388                 return 0;
3389
3390         case TM_NODE_LEVEL_SUBPORT:
3391                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3392                         return -rte_tm_error_set(error,
3393                                 EINVAL,
3394                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3395                                 NULL,
3396                                 rte_strerror(EINVAL));
3397                 return 0;
3398
3399         case TM_NODE_LEVEL_PIPE:
3400                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3401                         return -rte_tm_error_set(error,
3402                                 EINVAL,
3403                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3404                                 NULL,
3405                                 rte_strerror(EINVAL));
3406                 return 0;
3407
3408         case TM_NODE_LEVEL_TC:
3409                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3410                         return -rte_tm_error_set(error,
3411                                 EINVAL,
3412                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3413                                 NULL,
3414                                 rte_strerror(EINVAL));
3415                 return 0;
3416
3417         case TM_NODE_LEVEL_QUEUE:
3418         default:
3419                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3420                         return -rte_tm_error_set(error,
3421                                 EINVAL,
3422                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3423                                 NULL,
3424                                 rte_strerror(EINVAL));
3425                 return 0;
3426         }
3427 }
3428
3429 const struct rte_tm_ops pmd_tm_ops = {
3430         .node_type_get = pmd_tm_node_type_get,
3431         .capabilities_get = pmd_tm_capabilities_get,
3432         .level_capabilities_get = pmd_tm_level_capabilities_get,
3433         .node_capabilities_get = pmd_tm_node_capabilities_get,
3434
3435         .wred_profile_add = pmd_tm_wred_profile_add,
3436         .wred_profile_delete = pmd_tm_wred_profile_delete,
3437         .shared_wred_context_add_update = NULL,
3438         .shared_wred_context_delete = NULL,
3439
3440         .shaper_profile_add = pmd_tm_shaper_profile_add,
3441         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3442         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3443         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3444
3445         .node_add = pmd_tm_node_add,
3446         .node_delete = pmd_tm_node_delete,
3447         .node_suspend = NULL,
3448         .node_resume = NULL,
3449         .hierarchy_commit = pmd_tm_hierarchy_commit,
3450
3451         .node_parent_update = pmd_tm_node_parent_update,
3452         .node_shaper_update = pmd_tm_node_shaper_update,
3453         .node_shared_shaper_update = NULL,
3454         .node_stats_update = NULL,
3455         .node_wfq_weight_mode_update = NULL,
3456         .node_cman_update = NULL,
3457         .node_wred_context_update = NULL,
3458         .node_shared_wred_context_update = NULL,
3459
3460         .node_stats_read = pmd_tm_node_stats_read,
3461 };