net/avp: remove resources when port is closed
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21         TAILQ_INIT(&p->tmgr_port_list);
22
23         return 0;
24 }
25
26 void
27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29         for ( ; ; ) {
30                 struct softnic_tmgr_port *tmgr_port;
31
32                 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33                 if (tmgr_port == NULL)
34                         break;
35
36                 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37                 rte_sched_port_free(tmgr_port->s);
38                 free(tmgr_port);
39         }
40 }
41
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
44         const char *name)
45 {
46         struct softnic_tmgr_port *tmgr_port;
47
48         if (name == NULL)
49                 return NULL;
50
51         TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52                 if (strcmp(tmgr_port->name, name) == 0)
53                         return tmgr_port;
54
55         return NULL;
56 }
57
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
60         const char *name)
61 {
62         struct softnic_tmgr_port *tmgr_port;
63         struct tm_params *t = &p->soft.tm.params;
64         struct rte_sched_port *sched;
65         uint32_t n_subports, subport_id;
66
67         /* Check input params */
68         if (name == NULL ||
69                 softnic_tmgr_port_find(p, name))
70                 return NULL;
71
72         /*
73          * Resource
74          */
75
76         /* Is hierarchy frozen? */
77         if (p->soft.tm.hierarchy_frozen == 0)
78                 return NULL;
79
80         /* Port */
81         sched = rte_sched_port_config(&t->port_params);
82         if (sched == NULL)
83                 return NULL;
84
85         /* Subport */
86         n_subports = t->port_params.n_subports_per_port;
87         for (subport_id = 0; subport_id < n_subports; subport_id++) {
88                 uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
89                 uint32_t pipe_id;
90                 int status;
91
92                 status = rte_sched_subport_config(sched,
93                         subport_id,
94                         &t->subport_params[subport_id]);
95                 if (status) {
96                         rte_sched_port_free(sched);
97                         return NULL;
98                 }
99
100                 /* Pipe */
101                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
102                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
103                         int profile_id = t->pipe_to_profile[pos];
104
105                         if (profile_id < 0)
106                                 continue;
107
108                         status = rte_sched_pipe_config(sched,
109                                 subport_id,
110                                 pipe_id,
111                                 profile_id);
112                         if (status) {
113                                 rte_sched_port_free(sched);
114                                 return NULL;
115                         }
116                 }
117         }
118
119         /* Node allocation */
120         tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
121         if (tmgr_port == NULL) {
122                 rte_sched_port_free(sched);
123                 return NULL;
124         }
125
126         /* Node fill in */
127         strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
128         tmgr_port->s = sched;
129
130         /* Node add to list */
131         TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
132
133         return tmgr_port;
134 }
135
136 static struct rte_sched_port *
137 SCHED(struct pmd_internals *p)
138 {
139         struct softnic_tmgr_port *tmgr_port;
140
141         tmgr_port = softnic_tmgr_port_find(p, "TMGR");
142         if (tmgr_port == NULL)
143                 return NULL;
144
145         return tmgr_port->s;
146 }
147
148 void
149 tm_hierarchy_init(struct pmd_internals *p)
150 {
151         memset(&p->soft.tm, 0, sizeof(p->soft.tm));
152
153         /* Initialize shaper profile list */
154         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
155
156         /* Initialize shared shaper list */
157         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
158
159         /* Initialize wred profile list */
160         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
161
162         /* Initialize TM node list */
163         TAILQ_INIT(&p->soft.tm.h.nodes);
164 }
165
166 void
167 tm_hierarchy_free(struct pmd_internals *p)
168 {
169         /* Remove all nodes*/
170         for ( ; ; ) {
171                 struct tm_node *tm_node;
172
173                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
174                 if (tm_node == NULL)
175                         break;
176
177                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
178                 free(tm_node);
179         }
180
181         /* Remove all WRED profiles */
182         for ( ; ; ) {
183                 struct tm_wred_profile *wred_profile;
184
185                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
186                 if (wred_profile == NULL)
187                         break;
188
189                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
190                 free(wred_profile);
191         }
192
193         /* Remove all shared shapers */
194         for ( ; ; ) {
195                 struct tm_shared_shaper *shared_shaper;
196
197                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
198                 if (shared_shaper == NULL)
199                         break;
200
201                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
202                 free(shared_shaper);
203         }
204
205         /* Remove all shaper profiles */
206         for ( ; ; ) {
207                 struct tm_shaper_profile *shaper_profile;
208
209                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
210                 if (shaper_profile == NULL)
211                         break;
212
213                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
214                         shaper_profile, node);
215                 free(shaper_profile);
216         }
217
218         tm_hierarchy_init(p);
219 }
220
221 static struct tm_shaper_profile *
222 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
223 {
224         struct pmd_internals *p = dev->data->dev_private;
225         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
226         struct tm_shaper_profile *sp;
227
228         TAILQ_FOREACH(sp, spl, node)
229                 if (shaper_profile_id == sp->shaper_profile_id)
230                         return sp;
231
232         return NULL;
233 }
234
235 static struct tm_shared_shaper *
236 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
237 {
238         struct pmd_internals *p = dev->data->dev_private;
239         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
240         struct tm_shared_shaper *ss;
241
242         TAILQ_FOREACH(ss, ssl, node)
243                 if (shared_shaper_id == ss->shared_shaper_id)
244                         return ss;
245
246         return NULL;
247 }
248
249 static struct tm_wred_profile *
250 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
251 {
252         struct pmd_internals *p = dev->data->dev_private;
253         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
254         struct tm_wred_profile *wp;
255
256         TAILQ_FOREACH(wp, wpl, node)
257                 if (wred_profile_id == wp->wred_profile_id)
258                         return wp;
259
260         return NULL;
261 }
262
263 static struct tm_node *
264 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
265 {
266         struct pmd_internals *p = dev->data->dev_private;
267         struct tm_node_list *nl = &p->soft.tm.h.nodes;
268         struct tm_node *n;
269
270         TAILQ_FOREACH(n, nl, node)
271                 if (n->node_id == node_id)
272                         return n;
273
274         return NULL;
275 }
276
277 static struct tm_node *
278 tm_root_node_present(struct rte_eth_dev *dev)
279 {
280         struct pmd_internals *p = dev->data->dev_private;
281         struct tm_node_list *nl = &p->soft.tm.h.nodes;
282         struct tm_node *n;
283
284         TAILQ_FOREACH(n, nl, node)
285                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
286                         return n;
287
288         return NULL;
289 }
290
291 static uint32_t
292 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
293 {
294         struct pmd_internals *p = dev->data->dev_private;
295         struct tm_node_list *nl = &p->soft.tm.h.nodes;
296         struct tm_node *ns;
297         uint32_t subport_id;
298
299         subport_id = 0;
300         TAILQ_FOREACH(ns, nl, node) {
301                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
302                         continue;
303
304                 if (ns->node_id == subport_node->node_id)
305                         return subport_id;
306
307                 subport_id++;
308         }
309
310         return UINT32_MAX;
311 }
312
313 static uint32_t
314 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
315 {
316         struct pmd_internals *p = dev->data->dev_private;
317         struct tm_node_list *nl = &p->soft.tm.h.nodes;
318         struct tm_node *np;
319         uint32_t pipe_id;
320
321         pipe_id = 0;
322         TAILQ_FOREACH(np, nl, node) {
323                 if (np->level != TM_NODE_LEVEL_PIPE ||
324                         np->parent_node_id != pipe_node->parent_node_id)
325                         continue;
326
327                 if (np->node_id == pipe_node->node_id)
328                         return pipe_id;
329
330                 pipe_id++;
331         }
332
333         return UINT32_MAX;
334 }
335
336 static uint32_t
337 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
338 {
339         return tc_node->priority;
340 }
341
342 static uint32_t
343 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
344 {
345         struct pmd_internals *p = dev->data->dev_private;
346         struct tm_node_list *nl = &p->soft.tm.h.nodes;
347         struct tm_node *nq;
348         uint32_t queue_id;
349
350         queue_id = 0;
351         TAILQ_FOREACH(nq, nl, node) {
352                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
353                         nq->parent_node_id != queue_node->parent_node_id)
354                         continue;
355
356                 if (nq->node_id == queue_node->node_id)
357                         return queue_id;
358
359                 queue_id++;
360         }
361
362         return UINT32_MAX;
363 }
364
365 static uint32_t
366 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
367 {
368         struct pmd_internals *p = dev->data->dev_private;
369         uint32_t n_queues_max = p->params.tm.n_queues;
370         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
371         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
372         uint32_t n_subports_max = n_pipes_max;
373         uint32_t n_root_max = 1;
374
375         switch (level) {
376         case TM_NODE_LEVEL_PORT:
377                 return n_root_max;
378         case TM_NODE_LEVEL_SUBPORT:
379                 return n_subports_max;
380         case TM_NODE_LEVEL_PIPE:
381                 return n_pipes_max;
382         case TM_NODE_LEVEL_TC:
383                 return n_tc_max;
384         case TM_NODE_LEVEL_QUEUE:
385         default:
386                 return n_queues_max;
387         }
388 }
389
390 /* Traffic manager node type get */
391 static int
392 pmd_tm_node_type_get(struct rte_eth_dev *dev,
393         uint32_t node_id,
394         int *is_leaf,
395         struct rte_tm_error *error)
396 {
397         struct pmd_internals *p = dev->data->dev_private;
398
399         if (is_leaf == NULL)
400                 return -rte_tm_error_set(error,
401                    EINVAL,
402                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
403                    NULL,
404                    rte_strerror(EINVAL));
405
406         if (node_id == RTE_TM_NODE_ID_NULL ||
407                 (tm_node_search(dev, node_id) == NULL))
408                 return -rte_tm_error_set(error,
409                    EINVAL,
410                    RTE_TM_ERROR_TYPE_NODE_ID,
411                    NULL,
412                    rte_strerror(EINVAL));
413
414         *is_leaf = node_id < p->params.tm.n_queues;
415
416         return 0;
417 }
418
419 #ifdef RTE_SCHED_RED
420 #define WRED_SUPPORTED                                          1
421 #else
422 #define WRED_SUPPORTED                                          0
423 #endif
424
425 #define STATS_MASK_DEFAULT                                      \
426         (RTE_TM_STATS_N_PKTS |                                  \
427         RTE_TM_STATS_N_BYTES |                                  \
428         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
429         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
430
431 #define STATS_MASK_QUEUE                                                \
432         (STATS_MASK_DEFAULT |                                   \
433         RTE_TM_STATS_N_PKTS_QUEUED)
434
435 static const struct rte_tm_capabilities tm_cap = {
436         .n_nodes_max = UINT32_MAX,
437         .n_levels_max = TM_NODE_LEVEL_MAX,
438
439         .non_leaf_nodes_identical = 0,
440         .leaf_nodes_identical = 1,
441
442         .shaper_n_max = UINT32_MAX,
443         .shaper_private_n_max = UINT32_MAX,
444         .shaper_private_dual_rate_n_max = 0,
445         .shaper_private_rate_min = 1,
446         .shaper_private_rate_max = UINT32_MAX,
447
448         .shaper_shared_n_max = UINT32_MAX,
449         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
450         .shaper_shared_n_shapers_per_node_max = 1,
451         .shaper_shared_dual_rate_n_max = 0,
452         .shaper_shared_rate_min = 1,
453         .shaper_shared_rate_max = UINT32_MAX,
454
455         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
456         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
457
458         .sched_n_children_max = UINT32_MAX,
459         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
460         .sched_wfq_n_children_per_group_max = UINT32_MAX,
461         .sched_wfq_n_groups_max = 1,
462         .sched_wfq_weight_max = UINT32_MAX,
463
464         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
465         .cman_wred_byte_mode_supported = 0,
466         .cman_head_drop_supported = 0,
467         .cman_wred_context_n_max = 0,
468         .cman_wred_context_private_n_max = 0,
469         .cman_wred_context_shared_n_max = 0,
470         .cman_wred_context_shared_n_nodes_per_context_max = 0,
471         .cman_wred_context_shared_n_contexts_per_node_max = 0,
472
473         .mark_vlan_dei_supported = {0, 0, 0},
474         .mark_ip_ecn_tcp_supported = {0, 0, 0},
475         .mark_ip_ecn_sctp_supported = {0, 0, 0},
476         .mark_ip_dscp_supported = {0, 0, 0},
477
478         .dynamic_update_mask = 0,
479
480         .stats_mask = STATS_MASK_QUEUE,
481 };
482
483 /* Traffic manager capabilities get */
484 static int
485 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
486         struct rte_tm_capabilities *cap,
487         struct rte_tm_error *error)
488 {
489         if (cap == NULL)
490                 return -rte_tm_error_set(error,
491                    EINVAL,
492                    RTE_TM_ERROR_TYPE_CAPABILITIES,
493                    NULL,
494                    rte_strerror(EINVAL));
495
496         memcpy(cap, &tm_cap, sizeof(*cap));
497
498         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
499                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
500                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
501                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
502                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
503
504         cap->shaper_private_n_max =
505                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
506                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
507                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
508                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
509
510         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
511                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
512
513         cap->shaper_n_max = cap->shaper_private_n_max +
514                 cap->shaper_shared_n_max;
515
516         cap->shaper_shared_n_nodes_per_shaper_max =
517                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
518
519         cap->sched_n_children_max = RTE_MAX(
520                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
521                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
522
523         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
524
525         if (WRED_SUPPORTED)
526                 cap->cman_wred_context_private_n_max =
527                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
528
529         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
530                 cap->cman_wred_context_shared_n_max;
531
532         return 0;
533 }
534
535 static const struct rte_tm_level_capabilities tm_level_cap[] = {
536         [TM_NODE_LEVEL_PORT] = {
537                 .n_nodes_max = 1,
538                 .n_nodes_nonleaf_max = 1,
539                 .n_nodes_leaf_max = 0,
540                 .non_leaf_nodes_identical = 1,
541                 .leaf_nodes_identical = 0,
542
543                 {.nonleaf = {
544                         .shaper_private_supported = 1,
545                         .shaper_private_dual_rate_supported = 0,
546                         .shaper_private_rate_min = 1,
547                         .shaper_private_rate_max = UINT32_MAX,
548                         .shaper_shared_n_max = 0,
549
550                         .sched_n_children_max = UINT32_MAX,
551                         .sched_sp_n_priorities_max = 1,
552                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
553                         .sched_wfq_n_groups_max = 1,
554                         .sched_wfq_weight_max = 1,
555
556                         .stats_mask = STATS_MASK_DEFAULT,
557                 } },
558         },
559
560         [TM_NODE_LEVEL_SUBPORT] = {
561                 .n_nodes_max = UINT32_MAX,
562                 .n_nodes_nonleaf_max = UINT32_MAX,
563                 .n_nodes_leaf_max = 0,
564                 .non_leaf_nodes_identical = 1,
565                 .leaf_nodes_identical = 0,
566
567                 {.nonleaf = {
568                         .shaper_private_supported = 1,
569                         .shaper_private_dual_rate_supported = 0,
570                         .shaper_private_rate_min = 1,
571                         .shaper_private_rate_max = UINT32_MAX,
572                         .shaper_shared_n_max = 0,
573
574                         .sched_n_children_max = UINT32_MAX,
575                         .sched_sp_n_priorities_max = 1,
576                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
577                         .sched_wfq_n_groups_max = 1,
578 #ifdef RTE_SCHED_SUBPORT_TC_OV
579                         .sched_wfq_weight_max = UINT32_MAX,
580 #else
581                         .sched_wfq_weight_max = 1,
582 #endif
583                         .stats_mask = STATS_MASK_DEFAULT,
584                 } },
585         },
586
587         [TM_NODE_LEVEL_PIPE] = {
588                 .n_nodes_max = UINT32_MAX,
589                 .n_nodes_nonleaf_max = UINT32_MAX,
590                 .n_nodes_leaf_max = 0,
591                 .non_leaf_nodes_identical = 1,
592                 .leaf_nodes_identical = 0,
593
594                 {.nonleaf = {
595                         .shaper_private_supported = 1,
596                         .shaper_private_dual_rate_supported = 0,
597                         .shaper_private_rate_min = 1,
598                         .shaper_private_rate_max = UINT32_MAX,
599                         .shaper_shared_n_max = 0,
600
601                         .sched_n_children_max =
602                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
603                         .sched_sp_n_priorities_max =
604                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
605                         .sched_wfq_n_children_per_group_max = 1,
606                         .sched_wfq_n_groups_max = 0,
607                         .sched_wfq_weight_max = 1,
608
609                         .stats_mask = STATS_MASK_DEFAULT,
610                 } },
611         },
612
613         [TM_NODE_LEVEL_TC] = {
614                 .n_nodes_max = UINT32_MAX,
615                 .n_nodes_nonleaf_max = UINT32_MAX,
616                 .n_nodes_leaf_max = 0,
617                 .non_leaf_nodes_identical = 1,
618                 .leaf_nodes_identical = 0,
619
620                 {.nonleaf = {
621                         .shaper_private_supported = 1,
622                         .shaper_private_dual_rate_supported = 0,
623                         .shaper_private_rate_min = 1,
624                         .shaper_private_rate_max = UINT32_MAX,
625                         .shaper_shared_n_max = 1,
626
627                         .sched_n_children_max =
628                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
629                         .sched_sp_n_priorities_max = 1,
630                         .sched_wfq_n_children_per_group_max =
631                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
632                         .sched_wfq_n_groups_max = 1,
633                         .sched_wfq_weight_max = UINT32_MAX,
634
635                         .stats_mask = STATS_MASK_DEFAULT,
636                 } },
637         },
638
639         [TM_NODE_LEVEL_QUEUE] = {
640                 .n_nodes_max = UINT32_MAX,
641                 .n_nodes_nonleaf_max = 0,
642                 .n_nodes_leaf_max = UINT32_MAX,
643                 .non_leaf_nodes_identical = 0,
644                 .leaf_nodes_identical = 1,
645
646                 {.leaf = {
647                         .shaper_private_supported = 0,
648                         .shaper_private_dual_rate_supported = 0,
649                         .shaper_private_rate_min = 0,
650                         .shaper_private_rate_max = 0,
651                         .shaper_shared_n_max = 0,
652
653                         .cman_head_drop_supported = 0,
654                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
655                         .cman_wred_byte_mode_supported = 0,
656                         .cman_wred_context_private_supported = WRED_SUPPORTED,
657                         .cman_wred_context_shared_n_max = 0,
658
659                         .stats_mask = STATS_MASK_QUEUE,
660                 } },
661         },
662 };
663
664 /* Traffic manager level capabilities get */
665 static int
666 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
667         uint32_t level_id,
668         struct rte_tm_level_capabilities *cap,
669         struct rte_tm_error *error)
670 {
671         if (cap == NULL)
672                 return -rte_tm_error_set(error,
673                    EINVAL,
674                    RTE_TM_ERROR_TYPE_CAPABILITIES,
675                    NULL,
676                    rte_strerror(EINVAL));
677
678         if (level_id >= TM_NODE_LEVEL_MAX)
679                 return -rte_tm_error_set(error,
680                    EINVAL,
681                    RTE_TM_ERROR_TYPE_LEVEL_ID,
682                    NULL,
683                    rte_strerror(EINVAL));
684
685         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
686
687         switch (level_id) {
688         case TM_NODE_LEVEL_PORT:
689                 cap->nonleaf.sched_n_children_max =
690                         tm_level_get_max_nodes(dev,
691                                 TM_NODE_LEVEL_SUBPORT);
692                 cap->nonleaf.sched_wfq_n_children_per_group_max =
693                         cap->nonleaf.sched_n_children_max;
694                 break;
695
696         case TM_NODE_LEVEL_SUBPORT:
697                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
698                         TM_NODE_LEVEL_SUBPORT);
699                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
700                 cap->nonleaf.sched_n_children_max =
701                         tm_level_get_max_nodes(dev,
702                                 TM_NODE_LEVEL_PIPE);
703                 cap->nonleaf.sched_wfq_n_children_per_group_max =
704                         cap->nonleaf.sched_n_children_max;
705                 break;
706
707         case TM_NODE_LEVEL_PIPE:
708                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
709                         TM_NODE_LEVEL_PIPE);
710                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
711                 break;
712
713         case TM_NODE_LEVEL_TC:
714                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
715                         TM_NODE_LEVEL_TC);
716                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
717                 break;
718
719         case TM_NODE_LEVEL_QUEUE:
720         default:
721                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
722                         TM_NODE_LEVEL_QUEUE);
723                 cap->n_nodes_leaf_max = cap->n_nodes_max;
724                 break;
725         }
726
727         return 0;
728 }
729
730 static const struct rte_tm_node_capabilities tm_node_cap[] = {
731         [TM_NODE_LEVEL_PORT] = {
732                 .shaper_private_supported = 1,
733                 .shaper_private_dual_rate_supported = 0,
734                 .shaper_private_rate_min = 1,
735                 .shaper_private_rate_max = UINT32_MAX,
736                 .shaper_shared_n_max = 0,
737
738                 {.nonleaf = {
739                         .sched_n_children_max = UINT32_MAX,
740                         .sched_sp_n_priorities_max = 1,
741                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
742                         .sched_wfq_n_groups_max = 1,
743                         .sched_wfq_weight_max = 1,
744                 } },
745
746                 .stats_mask = STATS_MASK_DEFAULT,
747         },
748
749         [TM_NODE_LEVEL_SUBPORT] = {
750                 .shaper_private_supported = 1,
751                 .shaper_private_dual_rate_supported = 0,
752                 .shaper_private_rate_min = 1,
753                 .shaper_private_rate_max = UINT32_MAX,
754                 .shaper_shared_n_max = 0,
755
756                 {.nonleaf = {
757                         .sched_n_children_max = UINT32_MAX,
758                         .sched_sp_n_priorities_max = 1,
759                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
760                         .sched_wfq_n_groups_max = 1,
761                         .sched_wfq_weight_max = UINT32_MAX,
762                 } },
763
764                 .stats_mask = STATS_MASK_DEFAULT,
765         },
766
767         [TM_NODE_LEVEL_PIPE] = {
768                 .shaper_private_supported = 1,
769                 .shaper_private_dual_rate_supported = 0,
770                 .shaper_private_rate_min = 1,
771                 .shaper_private_rate_max = UINT32_MAX,
772                 .shaper_shared_n_max = 0,
773
774                 {.nonleaf = {
775                         .sched_n_children_max =
776                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
777                         .sched_sp_n_priorities_max =
778                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
779                         .sched_wfq_n_children_per_group_max = 1,
780                         .sched_wfq_n_groups_max = 0,
781                         .sched_wfq_weight_max = 1,
782                 } },
783
784                 .stats_mask = STATS_MASK_DEFAULT,
785         },
786
787         [TM_NODE_LEVEL_TC] = {
788                 .shaper_private_supported = 1,
789                 .shaper_private_dual_rate_supported = 0,
790                 .shaper_private_rate_min = 1,
791                 .shaper_private_rate_max = UINT32_MAX,
792                 .shaper_shared_n_max = 1,
793
794                 {.nonleaf = {
795                         .sched_n_children_max =
796                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
797                         .sched_sp_n_priorities_max = 1,
798                         .sched_wfq_n_children_per_group_max =
799                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
800                         .sched_wfq_n_groups_max = 1,
801                         .sched_wfq_weight_max = UINT32_MAX,
802                 } },
803
804                 .stats_mask = STATS_MASK_DEFAULT,
805         },
806
807         [TM_NODE_LEVEL_QUEUE] = {
808                 .shaper_private_supported = 0,
809                 .shaper_private_dual_rate_supported = 0,
810                 .shaper_private_rate_min = 0,
811                 .shaper_private_rate_max = 0,
812                 .shaper_shared_n_max = 0,
813
814
815                 {.leaf = {
816                         .cman_head_drop_supported = 0,
817                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
818                         .cman_wred_byte_mode_supported = 0,
819                         .cman_wred_context_private_supported = WRED_SUPPORTED,
820                         .cman_wred_context_shared_n_max = 0,
821                 } },
822
823                 .stats_mask = STATS_MASK_QUEUE,
824         },
825 };
826
827 /* Traffic manager node capabilities get */
828 static int
829 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
830         uint32_t node_id,
831         struct rte_tm_node_capabilities *cap,
832         struct rte_tm_error *error)
833 {
834         struct tm_node *tm_node;
835
836         if (cap == NULL)
837                 return -rte_tm_error_set(error,
838                    EINVAL,
839                    RTE_TM_ERROR_TYPE_CAPABILITIES,
840                    NULL,
841                    rte_strerror(EINVAL));
842
843         tm_node = tm_node_search(dev, node_id);
844         if (tm_node == NULL)
845                 return -rte_tm_error_set(error,
846                    EINVAL,
847                    RTE_TM_ERROR_TYPE_NODE_ID,
848                    NULL,
849                    rte_strerror(EINVAL));
850
851         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
852
853         switch (tm_node->level) {
854         case TM_NODE_LEVEL_PORT:
855                 cap->nonleaf.sched_n_children_max =
856                         tm_level_get_max_nodes(dev,
857                                 TM_NODE_LEVEL_SUBPORT);
858                 cap->nonleaf.sched_wfq_n_children_per_group_max =
859                         cap->nonleaf.sched_n_children_max;
860                 break;
861
862         case TM_NODE_LEVEL_SUBPORT:
863                 cap->nonleaf.sched_n_children_max =
864                         tm_level_get_max_nodes(dev,
865                                 TM_NODE_LEVEL_PIPE);
866                 cap->nonleaf.sched_wfq_n_children_per_group_max =
867                         cap->nonleaf.sched_n_children_max;
868                 break;
869
870         case TM_NODE_LEVEL_PIPE:
871         case TM_NODE_LEVEL_TC:
872         case TM_NODE_LEVEL_QUEUE:
873         default:
874                 break;
875         }
876
877         return 0;
878 }
879
880 static int
881 shaper_profile_check(struct rte_eth_dev *dev,
882         uint32_t shaper_profile_id,
883         struct rte_tm_shaper_params *profile,
884         struct rte_tm_error *error)
885 {
886         struct tm_shaper_profile *sp;
887
888         /* Shaper profile ID must not be NONE. */
889         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
890                 return -rte_tm_error_set(error,
891                         EINVAL,
892                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
893                         NULL,
894                         rte_strerror(EINVAL));
895
896         /* Shaper profile must not exist. */
897         sp = tm_shaper_profile_search(dev, shaper_profile_id);
898         if (sp)
899                 return -rte_tm_error_set(error,
900                         EEXIST,
901                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
902                         NULL,
903                         rte_strerror(EEXIST));
904
905         /* Profile must not be NULL. */
906         if (profile == NULL)
907                 return -rte_tm_error_set(error,
908                         EINVAL,
909                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
910                         NULL,
911                         rte_strerror(EINVAL));
912
913         /* Peak rate: non-zero, 32-bit */
914         if (profile->peak.rate == 0 ||
915                 profile->peak.rate >= UINT32_MAX)
916                 return -rte_tm_error_set(error,
917                         EINVAL,
918                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
919                         NULL,
920                         rte_strerror(EINVAL));
921
922         /* Peak size: non-zero, 32-bit */
923         if (profile->peak.size == 0 ||
924                 profile->peak.size >= UINT32_MAX)
925                 return -rte_tm_error_set(error,
926                         EINVAL,
927                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
928                         NULL,
929                         rte_strerror(EINVAL));
930
931         /* Dual-rate profiles are not supported. */
932         if (profile->committed.rate != 0)
933                 return -rte_tm_error_set(error,
934                         EINVAL,
935                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
936                         NULL,
937                         rte_strerror(EINVAL));
938
939         /* Packet length adjust: 24 bytes */
940         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
941                 return -rte_tm_error_set(error,
942                         EINVAL,
943                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
944                         NULL,
945                         rte_strerror(EINVAL));
946
947         return 0;
948 }
949
950 /* Traffic manager shaper profile add */
951 static int
952 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
953         uint32_t shaper_profile_id,
954         struct rte_tm_shaper_params *profile,
955         struct rte_tm_error *error)
956 {
957         struct pmd_internals *p = dev->data->dev_private;
958         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
959         struct tm_shaper_profile *sp;
960         int status;
961
962         /* Check input params */
963         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
964         if (status)
965                 return status;
966
967         /* Memory allocation */
968         sp = calloc(1, sizeof(struct tm_shaper_profile));
969         if (sp == NULL)
970                 return -rte_tm_error_set(error,
971                         ENOMEM,
972                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
973                         NULL,
974                         rte_strerror(ENOMEM));
975
976         /* Fill in */
977         sp->shaper_profile_id = shaper_profile_id;
978         memcpy(&sp->params, profile, sizeof(sp->params));
979
980         /* Add to list */
981         TAILQ_INSERT_TAIL(spl, sp, node);
982         p->soft.tm.h.n_shaper_profiles++;
983
984         return 0;
985 }
986
987 /* Traffic manager shaper profile delete */
988 static int
989 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
990         uint32_t shaper_profile_id,
991         struct rte_tm_error *error)
992 {
993         struct pmd_internals *p = dev->data->dev_private;
994         struct tm_shaper_profile *sp;
995
996         /* Check existing */
997         sp = tm_shaper_profile_search(dev, shaper_profile_id);
998         if (sp == NULL)
999                 return -rte_tm_error_set(error,
1000                         EINVAL,
1001                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1002                         NULL,
1003                         rte_strerror(EINVAL));
1004
1005         /* Check unused */
1006         if (sp->n_users)
1007                 return -rte_tm_error_set(error,
1008                         EBUSY,
1009                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1010                         NULL,
1011                         rte_strerror(EBUSY));
1012
1013         /* Remove from list */
1014         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1015         p->soft.tm.h.n_shaper_profiles--;
1016         free(sp);
1017
1018         return 0;
1019 }
1020
1021 static struct tm_node *
1022 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1023         struct tm_shared_shaper *ss)
1024 {
1025         struct pmd_internals *p = dev->data->dev_private;
1026         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1027         struct tm_node *n;
1028
1029         /* Subport: each TC uses shared shaper  */
1030         TAILQ_FOREACH(n, nl, node) {
1031                 if (n->level != TM_NODE_LEVEL_TC ||
1032                         n->params.n_shared_shapers == 0 ||
1033                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1034                         continue;
1035
1036                 return n;
1037         }
1038
1039         return NULL;
1040 }
1041
1042 static int
1043 update_subport_tc_rate(struct rte_eth_dev *dev,
1044         struct tm_node *nt,
1045         struct tm_shared_shaper *ss,
1046         struct tm_shaper_profile *sp_new)
1047 {
1048         struct pmd_internals *p = dev->data->dev_private;
1049         uint32_t tc_id = tm_node_tc_id(dev, nt);
1050
1051         struct tm_node *np = nt->parent_node;
1052
1053         struct tm_node *ns = np->parent_node;
1054         uint32_t subport_id = tm_node_subport_id(dev, ns);
1055
1056         struct rte_sched_subport_params subport_params;
1057
1058         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1059                 ss->shaper_profile_id);
1060
1061         /* Derive new subport configuration. */
1062         memcpy(&subport_params,
1063                 &p->soft.tm.params.subport_params[subport_id],
1064                 sizeof(subport_params));
1065         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1066
1067         /* Update the subport configuration. */
1068         if (rte_sched_subport_config(SCHED(p),
1069                 subport_id, &subport_params))
1070                 return -1;
1071
1072         /* Commit changes. */
1073         sp_old->n_users--;
1074
1075         ss->shaper_profile_id = sp_new->shaper_profile_id;
1076         sp_new->n_users++;
1077
1078         memcpy(&p->soft.tm.params.subport_params[subport_id],
1079                 &subport_params,
1080                 sizeof(subport_params));
1081
1082         return 0;
1083 }
1084
1085 /* Traffic manager shared shaper add/update */
1086 static int
1087 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1088         uint32_t shared_shaper_id,
1089         uint32_t shaper_profile_id,
1090         struct rte_tm_error *error)
1091 {
1092         struct pmd_internals *p = dev->data->dev_private;
1093         struct tm_shared_shaper *ss;
1094         struct tm_shaper_profile *sp;
1095         struct tm_node *nt;
1096
1097         /* Shaper profile must be valid. */
1098         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1099         if (sp == NULL)
1100                 return -rte_tm_error_set(error,
1101                         EINVAL,
1102                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1103                         NULL,
1104                         rte_strerror(EINVAL));
1105
1106         /**
1107          * Add new shared shaper
1108          */
1109         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1110         if (ss == NULL) {
1111                 struct tm_shared_shaper_list *ssl =
1112                         &p->soft.tm.h.shared_shapers;
1113
1114                 /* Hierarchy must not be frozen */
1115                 if (p->soft.tm.hierarchy_frozen)
1116                         return -rte_tm_error_set(error,
1117                                 EBUSY,
1118                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1119                                 NULL,
1120                                 rte_strerror(EBUSY));
1121
1122                 /* Memory allocation */
1123                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1124                 if (ss == NULL)
1125                         return -rte_tm_error_set(error,
1126                                 ENOMEM,
1127                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1128                                 NULL,
1129                                 rte_strerror(ENOMEM));
1130
1131                 /* Fill in */
1132                 ss->shared_shaper_id = shared_shaper_id;
1133                 ss->shaper_profile_id = shaper_profile_id;
1134
1135                 /* Add to list */
1136                 TAILQ_INSERT_TAIL(ssl, ss, node);
1137                 p->soft.tm.h.n_shared_shapers++;
1138
1139                 return 0;
1140         }
1141
1142         /**
1143          * Update existing shared shaper
1144          */
1145         /* Hierarchy must be frozen (run-time update) */
1146         if (p->soft.tm.hierarchy_frozen == 0)
1147                 return -rte_tm_error_set(error,
1148                         EBUSY,
1149                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1150                         NULL,
1151                         rte_strerror(EBUSY));
1152
1153
1154         /* Propagate change. */
1155         nt = tm_shared_shaper_get_tc(dev, ss);
1156         if (update_subport_tc_rate(dev, nt, ss, sp))
1157                 return -rte_tm_error_set(error,
1158                         EINVAL,
1159                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1160                         NULL,
1161                         rte_strerror(EINVAL));
1162
1163         return 0;
1164 }
1165
1166 /* Traffic manager shared shaper delete */
1167 static int
1168 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1169         uint32_t shared_shaper_id,
1170         struct rte_tm_error *error)
1171 {
1172         struct pmd_internals *p = dev->data->dev_private;
1173         struct tm_shared_shaper *ss;
1174
1175         /* Check existing */
1176         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1177         if (ss == NULL)
1178                 return -rte_tm_error_set(error,
1179                         EINVAL,
1180                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1181                         NULL,
1182                         rte_strerror(EINVAL));
1183
1184         /* Check unused */
1185         if (ss->n_users)
1186                 return -rte_tm_error_set(error,
1187                         EBUSY,
1188                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1189                         NULL,
1190                         rte_strerror(EBUSY));
1191
1192         /* Remove from list */
1193         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1194         p->soft.tm.h.n_shared_shapers--;
1195         free(ss);
1196
1197         return 0;
1198 }
1199
1200 static int
1201 wred_profile_check(struct rte_eth_dev *dev,
1202         uint32_t wred_profile_id,
1203         struct rte_tm_wred_params *profile,
1204         struct rte_tm_error *error)
1205 {
1206         struct tm_wred_profile *wp;
1207         enum rte_color color;
1208
1209         /* WRED profile ID must not be NONE. */
1210         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1211                 return -rte_tm_error_set(error,
1212                         EINVAL,
1213                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1214                         NULL,
1215                         rte_strerror(EINVAL));
1216
1217         /* WRED profile must not exist. */
1218         wp = tm_wred_profile_search(dev, wred_profile_id);
1219         if (wp)
1220                 return -rte_tm_error_set(error,
1221                         EEXIST,
1222                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1223                         NULL,
1224                         rte_strerror(EEXIST));
1225
1226         /* Profile must not be NULL. */
1227         if (profile == NULL)
1228                 return -rte_tm_error_set(error,
1229                         EINVAL,
1230                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1231                         NULL,
1232                         rte_strerror(EINVAL));
1233
1234         /* WRED profile should be in packet mode */
1235         if (profile->packet_mode == 0)
1236                 return -rte_tm_error_set(error,
1237                         ENOTSUP,
1238                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1239                         NULL,
1240                         rte_strerror(ENOTSUP));
1241
1242         /* min_th <= max_th, max_th > 0  */
1243         for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1244                 uint32_t min_th = profile->red_params[color].min_th;
1245                 uint32_t max_th = profile->red_params[color].max_th;
1246
1247                 if (min_th > max_th ||
1248                         max_th == 0 ||
1249                         min_th > UINT16_MAX ||
1250                         max_th > UINT16_MAX)
1251                         return -rte_tm_error_set(error,
1252                                 EINVAL,
1253                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1254                                 NULL,
1255                                 rte_strerror(EINVAL));
1256         }
1257
1258         return 0;
1259 }
1260
1261 /* Traffic manager WRED profile add */
1262 static int
1263 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1264         uint32_t wred_profile_id,
1265         struct rte_tm_wred_params *profile,
1266         struct rte_tm_error *error)
1267 {
1268         struct pmd_internals *p = dev->data->dev_private;
1269         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1270         struct tm_wred_profile *wp;
1271         int status;
1272
1273         /* Check input params */
1274         status = wred_profile_check(dev, wred_profile_id, profile, error);
1275         if (status)
1276                 return status;
1277
1278         /* Memory allocation */
1279         wp = calloc(1, sizeof(struct tm_wred_profile));
1280         if (wp == NULL)
1281                 return -rte_tm_error_set(error,
1282                         ENOMEM,
1283                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1284                         NULL,
1285                         rte_strerror(ENOMEM));
1286
1287         /* Fill in */
1288         wp->wred_profile_id = wred_profile_id;
1289         memcpy(&wp->params, profile, sizeof(wp->params));
1290
1291         /* Add to list */
1292         TAILQ_INSERT_TAIL(wpl, wp, node);
1293         p->soft.tm.h.n_wred_profiles++;
1294
1295         return 0;
1296 }
1297
1298 /* Traffic manager WRED profile delete */
1299 static int
1300 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1301         uint32_t wred_profile_id,
1302         struct rte_tm_error *error)
1303 {
1304         struct pmd_internals *p = dev->data->dev_private;
1305         struct tm_wred_profile *wp;
1306
1307         /* Check existing */
1308         wp = tm_wred_profile_search(dev, wred_profile_id);
1309         if (wp == NULL)
1310                 return -rte_tm_error_set(error,
1311                         EINVAL,
1312                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1313                         NULL,
1314                         rte_strerror(EINVAL));
1315
1316         /* Check unused */
1317         if (wp->n_users)
1318                 return -rte_tm_error_set(error,
1319                         EBUSY,
1320                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1321                         NULL,
1322                         rte_strerror(EBUSY));
1323
1324         /* Remove from list */
1325         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1326         p->soft.tm.h.n_wred_profiles--;
1327         free(wp);
1328
1329         return 0;
1330 }
1331
1332 static int
1333 node_add_check_port(struct rte_eth_dev *dev,
1334         uint32_t node_id,
1335         uint32_t parent_node_id __rte_unused,
1336         uint32_t priority,
1337         uint32_t weight,
1338         uint32_t level_id __rte_unused,
1339         struct rte_tm_node_params *params,
1340         struct rte_tm_error *error)
1341 {
1342         struct pmd_internals *p = dev->data->dev_private;
1343         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1344                 params->shaper_profile_id);
1345
1346         /* node type: non-leaf */
1347         if (node_id < p->params.tm.n_queues)
1348                 return -rte_tm_error_set(error,
1349                         EINVAL,
1350                         RTE_TM_ERROR_TYPE_NODE_ID,
1351                         NULL,
1352                         rte_strerror(EINVAL));
1353
1354         /* Priority must be 0 */
1355         if (priority != 0)
1356                 return -rte_tm_error_set(error,
1357                         EINVAL,
1358                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1359                         NULL,
1360                         rte_strerror(EINVAL));
1361
1362         /* Weight must be 1 */
1363         if (weight != 1)
1364                 return -rte_tm_error_set(error,
1365                         EINVAL,
1366                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1367                         NULL,
1368                         rte_strerror(EINVAL));
1369
1370         /* Shaper must be valid */
1371         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1372                 sp == NULL)
1373                 return -rte_tm_error_set(error,
1374                         EINVAL,
1375                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1376                         NULL,
1377                         rte_strerror(EINVAL));
1378
1379         /* No shared shapers */
1380         if (params->n_shared_shapers != 0)
1381                 return -rte_tm_error_set(error,
1382                         EINVAL,
1383                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1384                         NULL,
1385                         rte_strerror(EINVAL));
1386
1387         /* Number of SP priorities must be 1 */
1388         if (params->nonleaf.n_sp_priorities != 1)
1389                 return -rte_tm_error_set(error,
1390                         EINVAL,
1391                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1392                         NULL,
1393                         rte_strerror(EINVAL));
1394
1395         /* Stats */
1396         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1397                 return -rte_tm_error_set(error,
1398                         EINVAL,
1399                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1400                         NULL,
1401                         rte_strerror(EINVAL));
1402
1403         return 0;
1404 }
1405
1406 static int
1407 node_add_check_subport(struct rte_eth_dev *dev,
1408         uint32_t node_id,
1409         uint32_t parent_node_id __rte_unused,
1410         uint32_t priority,
1411         uint32_t weight,
1412         uint32_t level_id __rte_unused,
1413         struct rte_tm_node_params *params,
1414         struct rte_tm_error *error)
1415 {
1416         struct pmd_internals *p = dev->data->dev_private;
1417
1418         /* node type: non-leaf */
1419         if (node_id < p->params.tm.n_queues)
1420                 return -rte_tm_error_set(error,
1421                         EINVAL,
1422                         RTE_TM_ERROR_TYPE_NODE_ID,
1423                         NULL,
1424                         rte_strerror(EINVAL));
1425
1426         /* Priority must be 0 */
1427         if (priority != 0)
1428                 return -rte_tm_error_set(error,
1429                         EINVAL,
1430                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1431                         NULL,
1432                         rte_strerror(EINVAL));
1433
1434         /* Weight must be 1 */
1435         if (weight != 1)
1436                 return -rte_tm_error_set(error,
1437                         EINVAL,
1438                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1439                         NULL,
1440                         rte_strerror(EINVAL));
1441
1442         /* Shaper must be valid */
1443         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1444                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1445                 return -rte_tm_error_set(error,
1446                         EINVAL,
1447                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1448                         NULL,
1449                         rte_strerror(EINVAL));
1450
1451         /* No shared shapers */
1452         if (params->n_shared_shapers != 0)
1453                 return -rte_tm_error_set(error,
1454                         EINVAL,
1455                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1456                         NULL,
1457                         rte_strerror(EINVAL));
1458
1459         /* Number of SP priorities must be 1 */
1460         if (params->nonleaf.n_sp_priorities != 1)
1461                 return -rte_tm_error_set(error,
1462                         EINVAL,
1463                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1464                         NULL,
1465                         rte_strerror(EINVAL));
1466
1467         /* Stats */
1468         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1469                 return -rte_tm_error_set(error,
1470                         EINVAL,
1471                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1472                         NULL,
1473                         rte_strerror(EINVAL));
1474
1475         return 0;
1476 }
1477
1478 static int
1479 node_add_check_pipe(struct rte_eth_dev *dev,
1480         uint32_t node_id,
1481         uint32_t parent_node_id __rte_unused,
1482         uint32_t priority,
1483         uint32_t weight __rte_unused,
1484         uint32_t level_id __rte_unused,
1485         struct rte_tm_node_params *params,
1486         struct rte_tm_error *error)
1487 {
1488         struct pmd_internals *p = dev->data->dev_private;
1489
1490         /* node type: non-leaf */
1491         if (node_id < p->params.tm.n_queues)
1492                 return -rte_tm_error_set(error,
1493                         EINVAL,
1494                         RTE_TM_ERROR_TYPE_NODE_ID,
1495                         NULL,
1496                         rte_strerror(EINVAL));
1497
1498         /* Priority must be 0 */
1499         if (priority != 0)
1500                 return -rte_tm_error_set(error,
1501                         EINVAL,
1502                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1503                         NULL,
1504                         rte_strerror(EINVAL));
1505
1506         /* Shaper must be valid */
1507         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1508                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1509                 return -rte_tm_error_set(error,
1510                         EINVAL,
1511                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1512                         NULL,
1513                         rte_strerror(EINVAL));
1514
1515         /* No shared shapers */
1516         if (params->n_shared_shapers != 0)
1517                 return -rte_tm_error_set(error,
1518                         EINVAL,
1519                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1520                         NULL,
1521                         rte_strerror(EINVAL));
1522
1523         /* Number of SP priorities must be 4 */
1524         if (params->nonleaf.n_sp_priorities !=
1525                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1526                 return -rte_tm_error_set(error,
1527                         EINVAL,
1528                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1529                         NULL,
1530                         rte_strerror(EINVAL));
1531
1532         /* WFQ mode must be byte mode */
1533         if (params->nonleaf.wfq_weight_mode != NULL &&
1534                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1535                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1536                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1537                 params->nonleaf.wfq_weight_mode[3] != 0)
1538                 return -rte_tm_error_set(error,
1539                         EINVAL,
1540                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1541                         NULL,
1542                         rte_strerror(EINVAL));
1543
1544         /* Stats */
1545         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1546                 return -rte_tm_error_set(error,
1547                         EINVAL,
1548                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1549                         NULL,
1550                         rte_strerror(EINVAL));
1551
1552         return 0;
1553 }
1554
1555 static int
1556 node_add_check_tc(struct rte_eth_dev *dev,
1557         uint32_t node_id,
1558         uint32_t parent_node_id __rte_unused,
1559         uint32_t priority __rte_unused,
1560         uint32_t weight,
1561         uint32_t level_id __rte_unused,
1562         struct rte_tm_node_params *params,
1563         struct rte_tm_error *error)
1564 {
1565         struct pmd_internals *p = dev->data->dev_private;
1566
1567         /* node type: non-leaf */
1568         if (node_id < p->params.tm.n_queues)
1569                 return -rte_tm_error_set(error,
1570                         EINVAL,
1571                         RTE_TM_ERROR_TYPE_NODE_ID,
1572                         NULL,
1573                         rte_strerror(EINVAL));
1574
1575         /* Weight must be 1 */
1576         if (weight != 1)
1577                 return -rte_tm_error_set(error,
1578                         EINVAL,
1579                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1580                         NULL,
1581                         rte_strerror(EINVAL));
1582
1583         /* Shaper must be valid */
1584         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1585                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1586                 return -rte_tm_error_set(error,
1587                         EINVAL,
1588                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1589                         NULL,
1590                         rte_strerror(EINVAL));
1591
1592         /* Single valid shared shaper */
1593         if (params->n_shared_shapers > 1)
1594                 return -rte_tm_error_set(error,
1595                         EINVAL,
1596                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1597                         NULL,
1598                         rte_strerror(EINVAL));
1599
1600         if (params->n_shared_shapers == 1 &&
1601                 (params->shared_shaper_id == NULL ||
1602                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1603                 return -rte_tm_error_set(error,
1604                         EINVAL,
1605                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1606                         NULL,
1607                         rte_strerror(EINVAL));
1608
1609         /* Number of priorities must be 1 */
1610         if (params->nonleaf.n_sp_priorities != 1)
1611                 return -rte_tm_error_set(error,
1612                         EINVAL,
1613                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1614                         NULL,
1615                         rte_strerror(EINVAL));
1616
1617         /* Stats */
1618         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1619                 return -rte_tm_error_set(error,
1620                         EINVAL,
1621                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1622                         NULL,
1623                         rte_strerror(EINVAL));
1624
1625         return 0;
1626 }
1627
1628 static int
1629 node_add_check_queue(struct rte_eth_dev *dev,
1630         uint32_t node_id,
1631         uint32_t parent_node_id __rte_unused,
1632         uint32_t priority,
1633         uint32_t weight __rte_unused,
1634         uint32_t level_id __rte_unused,
1635         struct rte_tm_node_params *params,
1636         struct rte_tm_error *error)
1637 {
1638         struct pmd_internals *p = dev->data->dev_private;
1639
1640         /* node type: leaf */
1641         if (node_id >= p->params.tm.n_queues)
1642                 return -rte_tm_error_set(error,
1643                         EINVAL,
1644                         RTE_TM_ERROR_TYPE_NODE_ID,
1645                         NULL,
1646                         rte_strerror(EINVAL));
1647
1648         /* Priority must be 0 */
1649         if (priority != 0)
1650                 return -rte_tm_error_set(error,
1651                         EINVAL,
1652                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1653                         NULL,
1654                         rte_strerror(EINVAL));
1655
1656         /* No shaper */
1657         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1658                 return -rte_tm_error_set(error,
1659                         EINVAL,
1660                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1661                         NULL,
1662                         rte_strerror(EINVAL));
1663
1664         /* No shared shapers */
1665         if (params->n_shared_shapers != 0)
1666                 return -rte_tm_error_set(error,
1667                         EINVAL,
1668                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1669                         NULL,
1670                         rte_strerror(EINVAL));
1671
1672         /* Congestion management must not be head drop */
1673         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1674                 return -rte_tm_error_set(error,
1675                         EINVAL,
1676                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1677                         NULL,
1678                         rte_strerror(EINVAL));
1679
1680         /* Congestion management set to WRED */
1681         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1682                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1683                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1684                         wred_profile_id);
1685
1686                 /* WRED profile (for private WRED context) must be valid */
1687                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1688                         wp == NULL)
1689                         return -rte_tm_error_set(error,
1690                                 EINVAL,
1691                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1692                                 NULL,
1693                                 rte_strerror(EINVAL));
1694
1695                 /* No shared WRED contexts */
1696                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1697                         return -rte_tm_error_set(error,
1698                                 EINVAL,
1699                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1700                                 NULL,
1701                                 rte_strerror(EINVAL));
1702         }
1703
1704         /* Stats */
1705         if (params->stats_mask & ~STATS_MASK_QUEUE)
1706                 return -rte_tm_error_set(error,
1707                         EINVAL,
1708                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1709                         NULL,
1710                         rte_strerror(EINVAL));
1711
1712         return 0;
1713 }
1714
1715 static int
1716 node_add_check(struct rte_eth_dev *dev,
1717         uint32_t node_id,
1718         uint32_t parent_node_id,
1719         uint32_t priority,
1720         uint32_t weight,
1721         uint32_t level_id,
1722         struct rte_tm_node_params *params,
1723         struct rte_tm_error *error)
1724 {
1725         struct tm_node *pn;
1726         uint32_t level;
1727         int status;
1728
1729         /* node_id, parent_node_id:
1730          *    -node_id must not be RTE_TM_NODE_ID_NULL
1731          *    -node_id must not be in use
1732          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1733          *        -root node must not exist
1734          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1735          *        -parent_node_id must be valid
1736          */
1737         if (node_id == RTE_TM_NODE_ID_NULL)
1738                 return -rte_tm_error_set(error,
1739                         EINVAL,
1740                         RTE_TM_ERROR_TYPE_NODE_ID,
1741                         NULL,
1742                         rte_strerror(EINVAL));
1743
1744         if (tm_node_search(dev, node_id))
1745                 return -rte_tm_error_set(error,
1746                         EEXIST,
1747                         RTE_TM_ERROR_TYPE_NODE_ID,
1748                         NULL,
1749                         rte_strerror(EEXIST));
1750
1751         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1752                 pn = NULL;
1753                 if (tm_root_node_present(dev))
1754                         return -rte_tm_error_set(error,
1755                                 EEXIST,
1756                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1757                                 NULL,
1758                                 rte_strerror(EEXIST));
1759         } else {
1760                 pn = tm_node_search(dev, parent_node_id);
1761                 if (pn == NULL)
1762                         return -rte_tm_error_set(error,
1763                                 EINVAL,
1764                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1765                                 NULL,
1766                                 rte_strerror(EINVAL));
1767         }
1768
1769         /* priority: must be 0 .. 3 */
1770         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1771                 return -rte_tm_error_set(error,
1772                         EINVAL,
1773                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1774                         NULL,
1775                         rte_strerror(EINVAL));
1776
1777         /* weight: must be 1 .. 255 */
1778         if (weight == 0 || weight >= UINT8_MAX)
1779                 return -rte_tm_error_set(error,
1780                         EINVAL,
1781                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1782                         NULL,
1783                         rte_strerror(EINVAL));
1784
1785         /* level_id: if valid, then
1786          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1787          *        -level_id must be zero
1788          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1789          *        -level_id must be parent level ID plus one
1790          */
1791         level = (pn == NULL) ? 0 : pn->level + 1;
1792         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1793                 return -rte_tm_error_set(error,
1794                         EINVAL,
1795                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1796                         NULL,
1797                         rte_strerror(EINVAL));
1798
1799         /* params: must not be NULL */
1800         if (params == NULL)
1801                 return -rte_tm_error_set(error,
1802                         EINVAL,
1803                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1804                         NULL,
1805                         rte_strerror(EINVAL));
1806
1807         /* params: per level checks */
1808         switch (level) {
1809         case TM_NODE_LEVEL_PORT:
1810                 status = node_add_check_port(dev, node_id,
1811                         parent_node_id, priority, weight, level_id,
1812                         params, error);
1813                 if (status)
1814                         return status;
1815                 break;
1816
1817         case TM_NODE_LEVEL_SUBPORT:
1818                 status = node_add_check_subport(dev, node_id,
1819                         parent_node_id, priority, weight, level_id,
1820                         params, error);
1821                 if (status)
1822                         return status;
1823                 break;
1824
1825         case TM_NODE_LEVEL_PIPE:
1826                 status = node_add_check_pipe(dev, node_id,
1827                         parent_node_id, priority, weight, level_id,
1828                         params, error);
1829                 if (status)
1830                         return status;
1831                 break;
1832
1833         case TM_NODE_LEVEL_TC:
1834                 status = node_add_check_tc(dev, node_id,
1835                         parent_node_id, priority, weight, level_id,
1836                         params, error);
1837                 if (status)
1838                         return status;
1839                 break;
1840
1841         case TM_NODE_LEVEL_QUEUE:
1842                 status = node_add_check_queue(dev, node_id,
1843                         parent_node_id, priority, weight, level_id,
1844                         params, error);
1845                 if (status)
1846                         return status;
1847                 break;
1848
1849         default:
1850                 return -rte_tm_error_set(error,
1851                         EINVAL,
1852                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1853                         NULL,
1854                         rte_strerror(EINVAL));
1855         }
1856
1857         return 0;
1858 }
1859
1860 /* Traffic manager node add */
1861 static int
1862 pmd_tm_node_add(struct rte_eth_dev *dev,
1863         uint32_t node_id,
1864         uint32_t parent_node_id,
1865         uint32_t priority,
1866         uint32_t weight,
1867         uint32_t level_id,
1868         struct rte_tm_node_params *params,
1869         struct rte_tm_error *error)
1870 {
1871         struct pmd_internals *p = dev->data->dev_private;
1872         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1873         struct tm_node *n;
1874         uint32_t i;
1875         int status;
1876
1877         /* Checks */
1878         if (p->soft.tm.hierarchy_frozen)
1879                 return -rte_tm_error_set(error,
1880                         EBUSY,
1881                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1882                         NULL,
1883                         rte_strerror(EBUSY));
1884
1885         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1886                 level_id, params, error);
1887         if (status)
1888                 return status;
1889
1890         /* Memory allocation */
1891         n = calloc(1, sizeof(struct tm_node));
1892         if (n == NULL)
1893                 return -rte_tm_error_set(error,
1894                         ENOMEM,
1895                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1896                         NULL,
1897                         rte_strerror(ENOMEM));
1898
1899         /* Fill in */
1900         n->node_id = node_id;
1901         n->parent_node_id = parent_node_id;
1902         n->priority = priority;
1903         n->weight = weight;
1904
1905         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1906                 n->parent_node = tm_node_search(dev, parent_node_id);
1907                 n->level = n->parent_node->level + 1;
1908         }
1909
1910         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1911                 n->shaper_profile = tm_shaper_profile_search(dev,
1912                         params->shaper_profile_id);
1913
1914         if (n->level == TM_NODE_LEVEL_QUEUE &&
1915                 params->leaf.cman == RTE_TM_CMAN_WRED)
1916                 n->wred_profile = tm_wred_profile_search(dev,
1917                         params->leaf.wred.wred_profile_id);
1918
1919         memcpy(&n->params, params, sizeof(n->params));
1920
1921         /* Add to list */
1922         TAILQ_INSERT_TAIL(nl, n, node);
1923         p->soft.tm.h.n_nodes++;
1924
1925         /* Update dependencies */
1926         if (n->parent_node)
1927                 n->parent_node->n_children++;
1928
1929         if (n->shaper_profile)
1930                 n->shaper_profile->n_users++;
1931
1932         for (i = 0; i < params->n_shared_shapers; i++) {
1933                 struct tm_shared_shaper *ss;
1934
1935                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1936                 ss->n_users++;
1937         }
1938
1939         if (n->wred_profile)
1940                 n->wred_profile->n_users++;
1941
1942         p->soft.tm.h.n_tm_nodes[n->level]++;
1943
1944         return 0;
1945 }
1946
1947 /* Traffic manager node delete */
1948 static int
1949 pmd_tm_node_delete(struct rte_eth_dev *dev,
1950         uint32_t node_id,
1951         struct rte_tm_error *error)
1952 {
1953         struct pmd_internals *p = dev->data->dev_private;
1954         struct tm_node *n;
1955         uint32_t i;
1956
1957         /* Check hierarchy changes are currently allowed */
1958         if (p->soft.tm.hierarchy_frozen)
1959                 return -rte_tm_error_set(error,
1960                         EBUSY,
1961                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1962                         NULL,
1963                         rte_strerror(EBUSY));
1964
1965         /* Check existing */
1966         n = tm_node_search(dev, node_id);
1967         if (n == NULL)
1968                 return -rte_tm_error_set(error,
1969                         EINVAL,
1970                         RTE_TM_ERROR_TYPE_NODE_ID,
1971                         NULL,
1972                         rte_strerror(EINVAL));
1973
1974         /* Check unused */
1975         if (n->n_children)
1976                 return -rte_tm_error_set(error,
1977                         EBUSY,
1978                         RTE_TM_ERROR_TYPE_NODE_ID,
1979                         NULL,
1980                         rte_strerror(EBUSY));
1981
1982         /* Update dependencies */
1983         p->soft.tm.h.n_tm_nodes[n->level]--;
1984
1985         if (n->wred_profile)
1986                 n->wred_profile->n_users--;
1987
1988         for (i = 0; i < n->params.n_shared_shapers; i++) {
1989                 struct tm_shared_shaper *ss;
1990
1991                 ss = tm_shared_shaper_search(dev,
1992                                 n->params.shared_shaper_id[i]);
1993                 ss->n_users--;
1994         }
1995
1996         if (n->shaper_profile)
1997                 n->shaper_profile->n_users--;
1998
1999         if (n->parent_node)
2000                 n->parent_node->n_children--;
2001
2002         /* Remove from list */
2003         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2004         p->soft.tm.h.n_nodes--;
2005         free(n);
2006
2007         return 0;
2008 }
2009
2010
2011 static void
2012 pipe_profile_build(struct rte_eth_dev *dev,
2013         struct tm_node *np,
2014         struct rte_sched_pipe_params *pp)
2015 {
2016         struct pmd_internals *p = dev->data->dev_private;
2017         struct tm_hierarchy *h = &p->soft.tm.h;
2018         struct tm_node_list *nl = &h->nodes;
2019         struct tm_node *nt, *nq;
2020
2021         memset(pp, 0, sizeof(*pp));
2022
2023         /* Pipe */
2024         pp->tb_rate = np->shaper_profile->params.peak.rate;
2025         pp->tb_size = np->shaper_profile->params.peak.size;
2026
2027         /* Traffic Class (TC) */
2028         pp->tc_period = PIPE_TC_PERIOD;
2029
2030 #ifdef RTE_SCHED_SUBPORT_TC_OV
2031         pp->tc_ov_weight = np->weight;
2032 #endif
2033
2034         TAILQ_FOREACH(nt, nl, node) {
2035                 uint32_t queue_id = 0;
2036
2037                 if (nt->level != TM_NODE_LEVEL_TC ||
2038                         nt->parent_node_id != np->node_id)
2039                         continue;
2040
2041                 pp->tc_rate[nt->priority] =
2042                         nt->shaper_profile->params.peak.rate;
2043
2044                 /* Queue */
2045                 TAILQ_FOREACH(nq, nl, node) {
2046                         uint32_t pipe_queue_id;
2047
2048                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2049                                 nq->parent_node_id != nt->node_id)
2050                                 continue;
2051
2052                         pipe_queue_id = nt->priority *
2053                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2054                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2055
2056                         queue_id++;
2057                 }
2058         }
2059 }
2060
2061 static int
2062 pipe_profile_free_exists(struct rte_eth_dev *dev,
2063         uint32_t *pipe_profile_id)
2064 {
2065         struct pmd_internals *p = dev->data->dev_private;
2066         struct tm_params *t = &p->soft.tm.params;
2067
2068         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2069                 *pipe_profile_id = t->n_pipe_profiles;
2070                 return 1;
2071         }
2072
2073         return 0;
2074 }
2075
2076 static int
2077 pipe_profile_exists(struct rte_eth_dev *dev,
2078         struct rte_sched_pipe_params *pp,
2079         uint32_t *pipe_profile_id)
2080 {
2081         struct pmd_internals *p = dev->data->dev_private;
2082         struct tm_params *t = &p->soft.tm.params;
2083         uint32_t i;
2084
2085         for (i = 0; i < t->n_pipe_profiles; i++)
2086                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2087                         if (pipe_profile_id)
2088                                 *pipe_profile_id = i;
2089                         return 1;
2090                 }
2091
2092         return 0;
2093 }
2094
2095 static void
2096 pipe_profile_install(struct rte_eth_dev *dev,
2097         struct rte_sched_pipe_params *pp,
2098         uint32_t pipe_profile_id)
2099 {
2100         struct pmd_internals *p = dev->data->dev_private;
2101         struct tm_params *t = &p->soft.tm.params;
2102
2103         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2104         t->n_pipe_profiles++;
2105 }
2106
2107 static void
2108 pipe_profile_mark(struct rte_eth_dev *dev,
2109         uint32_t subport_id,
2110         uint32_t pipe_id,
2111         uint32_t pipe_profile_id)
2112 {
2113         struct pmd_internals *p = dev->data->dev_private;
2114         struct tm_hierarchy *h = &p->soft.tm.h;
2115         struct tm_params *t = &p->soft.tm.params;
2116         uint32_t n_pipes_per_subport, pos;
2117
2118         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2119                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2120         pos = subport_id * n_pipes_per_subport + pipe_id;
2121
2122         t->pipe_to_profile[pos] = pipe_profile_id;
2123 }
2124
2125 static struct rte_sched_pipe_params *
2126 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2127 {
2128         struct pmd_internals *p = dev->data->dev_private;
2129         struct tm_hierarchy *h = &p->soft.tm.h;
2130         struct tm_params *t = &p->soft.tm.params;
2131         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2132                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2133
2134         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2135         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2136
2137         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2138         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2139
2140         return &t->pipe_profiles[pipe_profile_id];
2141 }
2142
2143 static int
2144 pipe_profiles_generate(struct rte_eth_dev *dev)
2145 {
2146         struct pmd_internals *p = dev->data->dev_private;
2147         struct tm_hierarchy *h = &p->soft.tm.h;
2148         struct tm_node_list *nl = &h->nodes;
2149         struct tm_node *ns, *np;
2150         uint32_t subport_id;
2151
2152         /* Objective: Fill in the following fields in struct tm_params:
2153          *    - pipe_profiles
2154          *    - n_pipe_profiles
2155          *    - pipe_to_profile
2156          */
2157
2158         subport_id = 0;
2159         TAILQ_FOREACH(ns, nl, node) {
2160                 uint32_t pipe_id;
2161
2162                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2163                         continue;
2164
2165                 pipe_id = 0;
2166                 TAILQ_FOREACH(np, nl, node) {
2167                         struct rte_sched_pipe_params pp;
2168                         uint32_t pos;
2169
2170                         if (np->level != TM_NODE_LEVEL_PIPE ||
2171                                 np->parent_node_id != ns->node_id)
2172                                 continue;
2173
2174                         pipe_profile_build(dev, np, &pp);
2175
2176                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2177                                 if (!pipe_profile_free_exists(dev, &pos))
2178                                         return -1;
2179
2180                                 pipe_profile_install(dev, &pp, pos);
2181                         }
2182
2183                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2184
2185                         pipe_id++;
2186                 }
2187
2188                 subport_id++;
2189         }
2190
2191         return 0;
2192 }
2193
2194 static struct tm_wred_profile *
2195 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2196 {
2197         struct pmd_internals *p = dev->data->dev_private;
2198         struct tm_hierarchy *h = &p->soft.tm.h;
2199         struct tm_node_list *nl = &h->nodes;
2200         struct tm_node *nq;
2201
2202         TAILQ_FOREACH(nq, nl, node) {
2203                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2204                         nq->parent_node->priority != tc_id)
2205                         continue;
2206
2207                 return nq->wred_profile;
2208         }
2209
2210         return NULL;
2211 }
2212
2213 #ifdef RTE_SCHED_RED
2214
2215 static void
2216 wred_profiles_set(struct rte_eth_dev *dev)
2217 {
2218         struct pmd_internals *p = dev->data->dev_private;
2219         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2220         uint32_t tc_id;
2221         enum rte_color color;
2222
2223         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2224                 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2225                         struct rte_red_params *dst =
2226                                 &pp->red_params[tc_id][color];
2227                         struct tm_wred_profile *src_wp =
2228                                 tm_tc_wred_profile_get(dev, tc_id);
2229                         struct rte_tm_red_params *src =
2230                                 &src_wp->params.red_params[color];
2231
2232                         memcpy(dst, src, sizeof(*dst));
2233                 }
2234 }
2235
2236 #else
2237
2238 #define wred_profiles_set(dev)
2239
2240 #endif
2241
2242 static struct tm_shared_shaper *
2243 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2244 {
2245         return (tc_node->params.n_shared_shapers) ?
2246                 tm_shared_shaper_search(dev,
2247                         tc_node->params.shared_shaper_id[0]) :
2248                 NULL;
2249 }
2250
2251 static struct tm_shared_shaper *
2252 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2253         struct tm_node *subport_node,
2254         uint32_t tc_id)
2255 {
2256         struct pmd_internals *p = dev->data->dev_private;
2257         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2258         struct tm_node *n;
2259
2260         TAILQ_FOREACH(n, nl, node) {
2261                 if (n->level != TM_NODE_LEVEL_TC ||
2262                         n->parent_node->parent_node_id !=
2263                                 subport_node->node_id ||
2264                         n->priority != tc_id)
2265                         continue;
2266
2267                 return tm_tc_shared_shaper_get(dev, n);
2268         }
2269
2270         return NULL;
2271 }
2272
2273 static int
2274 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2275 {
2276         struct pmd_internals *p = dev->data->dev_private;
2277         struct tm_hierarchy *h = &p->soft.tm.h;
2278         struct tm_node_list *nl = &h->nodes;
2279         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2280         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2281         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2282         struct tm_shared_shaper *ss;
2283
2284         uint32_t n_pipes_per_subport;
2285
2286         /* Root node exists. */
2287         if (nr == NULL)
2288                 return -rte_tm_error_set(error,
2289                         EINVAL,
2290                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2291                         NULL,
2292                         rte_strerror(EINVAL));
2293
2294         /* There is at least one subport, max is not exceeded. */
2295         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2296                 return -rte_tm_error_set(error,
2297                         EINVAL,
2298                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2299                         NULL,
2300                         rte_strerror(EINVAL));
2301
2302         /* There is at least one pipe. */
2303         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2304                 return -rte_tm_error_set(error,
2305                         EINVAL,
2306                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2307                         NULL,
2308                         rte_strerror(EINVAL));
2309
2310         /* Number of pipes is the same for all subports. Maximum number of pipes
2311          * per subport is not exceeded.
2312          */
2313         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2314                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2315
2316         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2317                 return -rte_tm_error_set(error,
2318                         EINVAL,
2319                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2320                         NULL,
2321                         rte_strerror(EINVAL));
2322
2323         TAILQ_FOREACH(ns, nl, node) {
2324                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2325                         continue;
2326
2327                 if (ns->n_children != n_pipes_per_subport)
2328                         return -rte_tm_error_set(error,
2329                                 EINVAL,
2330                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2331                                 NULL,
2332                                 rte_strerror(EINVAL));
2333         }
2334
2335         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2336         TAILQ_FOREACH(np, nl, node) {
2337                 uint32_t mask = 0, mask_expected =
2338                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2339                                 uint32_t);
2340
2341                 if (np->level != TM_NODE_LEVEL_PIPE)
2342                         continue;
2343
2344                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2345                         return -rte_tm_error_set(error,
2346                                 EINVAL,
2347                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2348                                 NULL,
2349                                 rte_strerror(EINVAL));
2350
2351                 TAILQ_FOREACH(nt, nl, node) {
2352                         if (nt->level != TM_NODE_LEVEL_TC ||
2353                                 nt->parent_node_id != np->node_id)
2354                                 continue;
2355
2356                         mask |= 1 << nt->priority;
2357                 }
2358
2359                 if (mask != mask_expected)
2360                         return -rte_tm_error_set(error,
2361                                 EINVAL,
2362                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2363                                 NULL,
2364                                 rte_strerror(EINVAL));
2365         }
2366
2367         /* Each TC has exactly 4 packet queues. */
2368         TAILQ_FOREACH(nt, nl, node) {
2369                 if (nt->level != TM_NODE_LEVEL_TC)
2370                         continue;
2371
2372                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2373                         return -rte_tm_error_set(error,
2374                                 EINVAL,
2375                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2376                                 NULL,
2377                                 rte_strerror(EINVAL));
2378         }
2379
2380         /**
2381          * Shared shapers:
2382          *    -For each TC #i, all pipes in the same subport use the same
2383          *     shared shaper (or no shared shaper) for their TC#i.
2384          *    -Each shared shaper needs to have at least one user. All its
2385          *     users have to be TC nodes with the same priority and the same
2386          *     subport.
2387          */
2388         TAILQ_FOREACH(ns, nl, node) {
2389                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2390                 uint32_t id;
2391
2392                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2393                         continue;
2394
2395                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2396                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2397
2398                 TAILQ_FOREACH(nt, nl, node) {
2399                         struct tm_shared_shaper *subport_ss, *tc_ss;
2400
2401                         if (nt->level != TM_NODE_LEVEL_TC ||
2402                                 nt->parent_node->parent_node_id !=
2403                                         ns->node_id)
2404                                 continue;
2405
2406                         subport_ss = s[nt->priority];
2407                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2408
2409                         if (subport_ss == NULL && tc_ss == NULL)
2410                                 continue;
2411
2412                         if ((subport_ss == NULL && tc_ss != NULL) ||
2413                                 (subport_ss != NULL && tc_ss == NULL) ||
2414                                 subport_ss->shared_shaper_id !=
2415                                         tc_ss->shared_shaper_id)
2416                                 return -rte_tm_error_set(error,
2417                                         EINVAL,
2418                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2419                                         NULL,
2420                                         rte_strerror(EINVAL));
2421                 }
2422         }
2423
2424         TAILQ_FOREACH(ss, ssl, node) {
2425                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2426                 uint32_t n_users = 0;
2427
2428                 if (nt_any != NULL)
2429                         TAILQ_FOREACH(nt, nl, node) {
2430                                 if (nt->level != TM_NODE_LEVEL_TC ||
2431                                         nt->priority != nt_any->priority ||
2432                                         nt->parent_node->parent_node_id !=
2433                                         nt_any->parent_node->parent_node_id)
2434                                         continue;
2435
2436                                 n_users++;
2437                         }
2438
2439                 if (ss->n_users == 0 || ss->n_users != n_users)
2440                         return -rte_tm_error_set(error,
2441                                 EINVAL,
2442                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2443                                 NULL,
2444                                 rte_strerror(EINVAL));
2445         }
2446
2447         /* Not too many pipe profiles. */
2448         if (pipe_profiles_generate(dev))
2449                 return -rte_tm_error_set(error,
2450                         EINVAL,
2451                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2452                         NULL,
2453                         rte_strerror(EINVAL));
2454
2455         /**
2456          * WRED (when used, i.e. at least one WRED profile defined):
2457          *    -Each WRED profile must have at least one user.
2458          *    -All leaf nodes must have their private WRED context enabled.
2459          *    -For each TC #i, all leaf nodes must use the same WRED profile
2460          *     for their private WRED context.
2461          */
2462         if (h->n_wred_profiles) {
2463                 struct tm_wred_profile *wp;
2464                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2465                 uint32_t id;
2466
2467                 TAILQ_FOREACH(wp, wpl, node)
2468                         if (wp->n_users == 0)
2469                                 return -rte_tm_error_set(error,
2470                                         EINVAL,
2471                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2472                                         NULL,
2473                                         rte_strerror(EINVAL));
2474
2475                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2476                         w[id] = tm_tc_wred_profile_get(dev, id);
2477
2478                         if (w[id] == NULL)
2479                                 return -rte_tm_error_set(error,
2480                                         EINVAL,
2481                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2482                                         NULL,
2483                                         rte_strerror(EINVAL));
2484                 }
2485
2486                 TAILQ_FOREACH(nq, nl, node) {
2487                         uint32_t id;
2488
2489                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2490                                 continue;
2491
2492                         id = nq->parent_node->priority;
2493
2494                         if (nq->wred_profile == NULL ||
2495                                 nq->wred_profile->wred_profile_id !=
2496                                         w[id]->wred_profile_id)
2497                                 return -rte_tm_error_set(error,
2498                                         EINVAL,
2499                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2500                                         NULL,
2501                                         rte_strerror(EINVAL));
2502                 }
2503         }
2504
2505         return 0;
2506 }
2507
2508 static void
2509 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2510 {
2511         struct pmd_internals *p = dev->data->dev_private;
2512         struct tm_params *t = &p->soft.tm.params;
2513         struct tm_hierarchy *h = &p->soft.tm.h;
2514
2515         struct tm_node_list *nl = &h->nodes;
2516         struct tm_node *root = tm_root_node_present(dev), *n;
2517
2518         uint32_t subport_id;
2519
2520         t->port_params = (struct rte_sched_port_params) {
2521                 .name = dev->data->name,
2522                 .socket = dev->data->numa_node,
2523                 .rate = root->shaper_profile->params.peak.rate,
2524                 .mtu = dev->data->mtu,
2525                 .frame_overhead =
2526                         root->shaper_profile->params.pkt_length_adjust,
2527                 .n_subports_per_port = root->n_children,
2528                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2529                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2530                 .qsize = {p->params.tm.qsize[0],
2531                         p->params.tm.qsize[1],
2532                         p->params.tm.qsize[2],
2533                         p->params.tm.qsize[3],
2534                 },
2535                 .pipe_profiles = t->pipe_profiles,
2536                 .n_pipe_profiles = t->n_pipe_profiles,
2537         };
2538
2539         wred_profiles_set(dev);
2540
2541         subport_id = 0;
2542         TAILQ_FOREACH(n, nl, node) {
2543                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2544                 uint32_t i;
2545
2546                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2547                         continue;
2548
2549                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2550                         struct tm_shared_shaper *ss;
2551                         struct tm_shaper_profile *sp;
2552
2553                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2554                         sp = (ss) ? tm_shaper_profile_search(dev,
2555                                 ss->shaper_profile_id) :
2556                                 n->shaper_profile;
2557                         tc_rate[i] = sp->params.peak.rate;
2558                 }
2559
2560                 t->subport_params[subport_id] =
2561                         (struct rte_sched_subport_params) {
2562                                 .tb_rate = n->shaper_profile->params.peak.rate,
2563                                 .tb_size = n->shaper_profile->params.peak.size,
2564
2565                                 .tc_rate = {tc_rate[0],
2566                                         tc_rate[1],
2567                                         tc_rate[2],
2568                                         tc_rate[3],
2569                         },
2570                         .tc_period = SUBPORT_TC_PERIOD,
2571                 };
2572
2573                 subport_id++;
2574         }
2575 }
2576
2577 /* Traffic manager hierarchy commit */
2578 static int
2579 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2580         int clear_on_fail,
2581         struct rte_tm_error *error)
2582 {
2583         struct pmd_internals *p = dev->data->dev_private;
2584         int status;
2585
2586         /* Checks */
2587         if (p->soft.tm.hierarchy_frozen)
2588                 return -rte_tm_error_set(error,
2589                         EBUSY,
2590                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2591                         NULL,
2592                         rte_strerror(EBUSY));
2593
2594         status = hierarchy_commit_check(dev, error);
2595         if (status) {
2596                 if (clear_on_fail)
2597                         tm_hierarchy_free(p);
2598
2599                 return status;
2600         }
2601
2602         /* Create blueprints */
2603         hierarchy_blueprints_create(dev);
2604
2605         /* Freeze hierarchy */
2606         p->soft.tm.hierarchy_frozen = 1;
2607
2608         return 0;
2609 }
2610
2611 #ifdef RTE_SCHED_SUBPORT_TC_OV
2612
2613 static int
2614 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2615 {
2616         struct pmd_internals *p = dev->data->dev_private;
2617         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2618
2619         struct tm_node *ns = np->parent_node;
2620         uint32_t subport_id = tm_node_subport_id(dev, ns);
2621
2622         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2623         struct rte_sched_pipe_params profile1;
2624         uint32_t pipe_profile_id;
2625
2626         /* Derive new pipe profile. */
2627         memcpy(&profile1, profile0, sizeof(profile1));
2628         profile1.tc_ov_weight = (uint8_t)weight;
2629
2630         /* Since implementation does not allow adding more pipe profiles after
2631          * port configuration, the pipe configuration can be successfully
2632          * updated only if the new profile is also part of the existing set of
2633          * pipe profiles.
2634          */
2635         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2636                 return -1;
2637
2638         /* Update the pipe profile used by the current pipe. */
2639         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2640                 (int32_t)pipe_profile_id))
2641                 return -1;
2642
2643         /* Commit changes. */
2644         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2645         np->weight = weight;
2646
2647         return 0;
2648 }
2649
2650 #endif
2651
2652 static int
2653 update_queue_weight(struct rte_eth_dev *dev,
2654         struct tm_node *nq, uint32_t weight)
2655 {
2656         struct pmd_internals *p = dev->data->dev_private;
2657         uint32_t queue_id = tm_node_queue_id(dev, nq);
2658
2659         struct tm_node *nt = nq->parent_node;
2660         uint32_t tc_id = tm_node_tc_id(dev, nt);
2661
2662         struct tm_node *np = nt->parent_node;
2663         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2664
2665         struct tm_node *ns = np->parent_node;
2666         uint32_t subport_id = tm_node_subport_id(dev, ns);
2667
2668         uint32_t pipe_queue_id =
2669                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2670
2671         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2672         struct rte_sched_pipe_params profile1;
2673         uint32_t pipe_profile_id;
2674
2675         /* Derive new pipe profile. */
2676         memcpy(&profile1, profile0, sizeof(profile1));
2677         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2678
2679         /* Since implementation does not allow adding more pipe profiles after
2680          * port configuration, the pipe configuration can be successfully
2681          * updated only if the new profile is also part of the existing set
2682          * of pipe profiles.
2683          */
2684         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2685                 return -1;
2686
2687         /* Update the pipe profile used by the current pipe. */
2688         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2689                 (int32_t)pipe_profile_id))
2690                 return -1;
2691
2692         /* Commit changes. */
2693         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2694         nq->weight = weight;
2695
2696         return 0;
2697 }
2698
2699 /* Traffic manager node parent update */
2700 static int
2701 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2702         uint32_t node_id,
2703         uint32_t parent_node_id,
2704         uint32_t priority,
2705         uint32_t weight,
2706         struct rte_tm_error *error)
2707 {
2708         struct tm_node *n;
2709
2710         /* Port must be started and TM used. */
2711         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2712                 return -rte_tm_error_set(error,
2713                         EBUSY,
2714                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2715                         NULL,
2716                         rte_strerror(EBUSY));
2717
2718         /* Node must be valid */
2719         n = tm_node_search(dev, node_id);
2720         if (n == NULL)
2721                 return -rte_tm_error_set(error,
2722                         EINVAL,
2723                         RTE_TM_ERROR_TYPE_NODE_ID,
2724                         NULL,
2725                         rte_strerror(EINVAL));
2726
2727         /* Parent node must be the same */
2728         if (n->parent_node_id != parent_node_id)
2729                 return -rte_tm_error_set(error,
2730                         EINVAL,
2731                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2732                         NULL,
2733                         rte_strerror(EINVAL));
2734
2735         /* Priority must be the same */
2736         if (n->priority != priority)
2737                 return -rte_tm_error_set(error,
2738                         EINVAL,
2739                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2740                         NULL,
2741                         rte_strerror(EINVAL));
2742
2743         /* weight: must be 1 .. 255 */
2744         if (weight == 0 || weight >= UINT8_MAX)
2745                 return -rte_tm_error_set(error,
2746                         EINVAL,
2747                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2748                         NULL,
2749                         rte_strerror(EINVAL));
2750
2751         switch (n->level) {
2752         case TM_NODE_LEVEL_PORT:
2753                 return -rte_tm_error_set(error,
2754                         EINVAL,
2755                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2756                         NULL,
2757                         rte_strerror(EINVAL));
2758                 /* fall-through */
2759         case TM_NODE_LEVEL_SUBPORT:
2760                 return -rte_tm_error_set(error,
2761                         EINVAL,
2762                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2763                         NULL,
2764                         rte_strerror(EINVAL));
2765                 /* fall-through */
2766         case TM_NODE_LEVEL_PIPE:
2767 #ifdef RTE_SCHED_SUBPORT_TC_OV
2768                 if (update_pipe_weight(dev, n, weight))
2769                         return -rte_tm_error_set(error,
2770                                 EINVAL,
2771                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2772                                 NULL,
2773                                 rte_strerror(EINVAL));
2774                 return 0;
2775 #else
2776                 return -rte_tm_error_set(error,
2777                         EINVAL,
2778                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2779                         NULL,
2780                         rte_strerror(EINVAL));
2781 #endif
2782                 /* fall-through */
2783         case TM_NODE_LEVEL_TC:
2784                 return -rte_tm_error_set(error,
2785                         EINVAL,
2786                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2787                         NULL,
2788                         rte_strerror(EINVAL));
2789                 /* fall-through */
2790         case TM_NODE_LEVEL_QUEUE:
2791                 /* fall-through */
2792         default:
2793                 if (update_queue_weight(dev, n, weight))
2794                         return -rte_tm_error_set(error,
2795                                 EINVAL,
2796                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2797                                 NULL,
2798                                 rte_strerror(EINVAL));
2799                 return 0;
2800         }
2801 }
2802
2803 static int
2804 update_subport_rate(struct rte_eth_dev *dev,
2805         struct tm_node *ns,
2806         struct tm_shaper_profile *sp)
2807 {
2808         struct pmd_internals *p = dev->data->dev_private;
2809         uint32_t subport_id = tm_node_subport_id(dev, ns);
2810
2811         struct rte_sched_subport_params subport_params;
2812
2813         /* Derive new subport configuration. */
2814         memcpy(&subport_params,
2815                 &p->soft.tm.params.subport_params[subport_id],
2816                 sizeof(subport_params));
2817         subport_params.tb_rate = sp->params.peak.rate;
2818         subport_params.tb_size = sp->params.peak.size;
2819
2820         /* Update the subport configuration. */
2821         if (rte_sched_subport_config(SCHED(p), subport_id,
2822                 &subport_params))
2823                 return -1;
2824
2825         /* Commit changes. */
2826         ns->shaper_profile->n_users--;
2827
2828         ns->shaper_profile = sp;
2829         ns->params.shaper_profile_id = sp->shaper_profile_id;
2830         sp->n_users++;
2831
2832         memcpy(&p->soft.tm.params.subport_params[subport_id],
2833                 &subport_params,
2834                 sizeof(subport_params));
2835
2836         return 0;
2837 }
2838
2839 static int
2840 update_pipe_rate(struct rte_eth_dev *dev,
2841         struct tm_node *np,
2842         struct tm_shaper_profile *sp)
2843 {
2844         struct pmd_internals *p = dev->data->dev_private;
2845         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2846
2847         struct tm_node *ns = np->parent_node;
2848         uint32_t subport_id = tm_node_subport_id(dev, ns);
2849
2850         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2851         struct rte_sched_pipe_params profile1;
2852         uint32_t pipe_profile_id;
2853
2854         /* Derive new pipe profile. */
2855         memcpy(&profile1, profile0, sizeof(profile1));
2856         profile1.tb_rate = sp->params.peak.rate;
2857         profile1.tb_size = sp->params.peak.size;
2858
2859         /* Since implementation does not allow adding more pipe profiles after
2860          * port configuration, the pipe configuration can be successfully
2861          * updated only if the new profile is also part of the existing set of
2862          * pipe profiles.
2863          */
2864         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2865                 return -1;
2866
2867         /* Update the pipe profile used by the current pipe. */
2868         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2869                 (int32_t)pipe_profile_id))
2870                 return -1;
2871
2872         /* Commit changes. */
2873         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2874         np->shaper_profile->n_users--;
2875         np->shaper_profile = sp;
2876         np->params.shaper_profile_id = sp->shaper_profile_id;
2877         sp->n_users++;
2878
2879         return 0;
2880 }
2881
2882 static int
2883 update_tc_rate(struct rte_eth_dev *dev,
2884         struct tm_node *nt,
2885         struct tm_shaper_profile *sp)
2886 {
2887         struct pmd_internals *p = dev->data->dev_private;
2888         uint32_t tc_id = tm_node_tc_id(dev, nt);
2889
2890         struct tm_node *np = nt->parent_node;
2891         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2892
2893         struct tm_node *ns = np->parent_node;
2894         uint32_t subport_id = tm_node_subport_id(dev, ns);
2895
2896         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2897         struct rte_sched_pipe_params profile1;
2898         uint32_t pipe_profile_id;
2899
2900         /* Derive new pipe profile. */
2901         memcpy(&profile1, profile0, sizeof(profile1));
2902         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2903
2904         /* Since implementation does not allow adding more pipe profiles after
2905          * port configuration, the pipe configuration can be successfully
2906          * updated only if the new profile is also part of the existing set of
2907          * pipe profiles.
2908          */
2909         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2910                 return -1;
2911
2912         /* Update the pipe profile used by the current pipe. */
2913         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2914                 (int32_t)pipe_profile_id))
2915                 return -1;
2916
2917         /* Commit changes. */
2918         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2919         nt->shaper_profile->n_users--;
2920         nt->shaper_profile = sp;
2921         nt->params.shaper_profile_id = sp->shaper_profile_id;
2922         sp->n_users++;
2923
2924         return 0;
2925 }
2926
2927 /* Traffic manager node shaper update */
2928 static int
2929 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2930         uint32_t node_id,
2931         uint32_t shaper_profile_id,
2932         struct rte_tm_error *error)
2933 {
2934         struct tm_node *n;
2935         struct tm_shaper_profile *sp;
2936
2937         /* Port must be started and TM used. */
2938         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2939                 return -rte_tm_error_set(error,
2940                         EBUSY,
2941                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2942                         NULL,
2943                         rte_strerror(EBUSY));
2944
2945         /* Node must be valid */
2946         n = tm_node_search(dev, node_id);
2947         if (n == NULL)
2948                 return -rte_tm_error_set(error,
2949                         EINVAL,
2950                         RTE_TM_ERROR_TYPE_NODE_ID,
2951                         NULL,
2952                         rte_strerror(EINVAL));
2953
2954         /* Shaper profile must be valid. */
2955         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2956         if (sp == NULL)
2957                 return -rte_tm_error_set(error,
2958                         EINVAL,
2959                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2960                         NULL,
2961                         rte_strerror(EINVAL));
2962
2963         switch (n->level) {
2964         case TM_NODE_LEVEL_PORT:
2965                 return -rte_tm_error_set(error,
2966                         EINVAL,
2967                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2968                         NULL,
2969                         rte_strerror(EINVAL));
2970                 /* fall-through */
2971         case TM_NODE_LEVEL_SUBPORT:
2972                 if (update_subport_rate(dev, n, sp))
2973                         return -rte_tm_error_set(error,
2974                                 EINVAL,
2975                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2976                                 NULL,
2977                                 rte_strerror(EINVAL));
2978                 return 0;
2979                 /* fall-through */
2980         case TM_NODE_LEVEL_PIPE:
2981                 if (update_pipe_rate(dev, n, sp))
2982                         return -rte_tm_error_set(error,
2983                                 EINVAL,
2984                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2985                                 NULL,
2986                                 rte_strerror(EINVAL));
2987                 return 0;
2988                 /* fall-through */
2989         case TM_NODE_LEVEL_TC:
2990                 if (update_tc_rate(dev, n, sp))
2991                         return -rte_tm_error_set(error,
2992                                 EINVAL,
2993                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2994                                 NULL,
2995                                 rte_strerror(EINVAL));
2996                 return 0;
2997                 /* fall-through */
2998         case TM_NODE_LEVEL_QUEUE:
2999                 /* fall-through */
3000         default:
3001                 return -rte_tm_error_set(error,
3002                         EINVAL,
3003                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3004                         NULL,
3005                         rte_strerror(EINVAL));
3006         }
3007 }
3008
3009 static inline uint32_t
3010 tm_port_queue_id(struct rte_eth_dev *dev,
3011         uint32_t port_subport_id,
3012         uint32_t subport_pipe_id,
3013         uint32_t pipe_tc_id,
3014         uint32_t tc_queue_id)
3015 {
3016         struct pmd_internals *p = dev->data->dev_private;
3017         struct tm_hierarchy *h = &p->soft.tm.h;
3018         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3019                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3020
3021         uint32_t port_pipe_id =
3022                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3023         uint32_t port_tc_id =
3024                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3025         uint32_t port_queue_id =
3026                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3027
3028         return port_queue_id;
3029 }
3030
3031 static int
3032 read_port_stats(struct rte_eth_dev *dev,
3033         struct tm_node *nr,
3034         struct rte_tm_node_stats *stats,
3035         uint64_t *stats_mask,
3036         int clear)
3037 {
3038         struct pmd_internals *p = dev->data->dev_private;
3039         struct tm_hierarchy *h = &p->soft.tm.h;
3040         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3041         uint32_t subport_id;
3042
3043         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3044                 struct rte_sched_subport_stats s;
3045                 uint32_t tc_ov, id;
3046
3047                 /* Stats read */
3048                 int status = rte_sched_subport_read_stats(SCHED(p),
3049                         subport_id,
3050                         &s,
3051                         &tc_ov);
3052                 if (status)
3053                         return status;
3054
3055                 /* Stats accumulate */
3056                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3057                         nr->stats.n_pkts +=
3058                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3059                         nr->stats.n_bytes +=
3060                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3061                         nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3062                                 s.n_pkts_tc_dropped[id];
3063                         nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3064                                 s.n_bytes_tc_dropped[id];
3065                 }
3066         }
3067
3068         /* Stats copy */
3069         if (stats)
3070                 memcpy(stats, &nr->stats, sizeof(*stats));
3071
3072         if (stats_mask)
3073                 *stats_mask = STATS_MASK_DEFAULT;
3074
3075         /* Stats clear */
3076         if (clear)
3077                 memset(&nr->stats, 0, sizeof(nr->stats));
3078
3079         return 0;
3080 }
3081
3082 static int
3083 read_subport_stats(struct rte_eth_dev *dev,
3084         struct tm_node *ns,
3085         struct rte_tm_node_stats *stats,
3086         uint64_t *stats_mask,
3087         int clear)
3088 {
3089         struct pmd_internals *p = dev->data->dev_private;
3090         uint32_t subport_id = tm_node_subport_id(dev, ns);
3091         struct rte_sched_subport_stats s;
3092         uint32_t tc_ov, tc_id;
3093
3094         /* Stats read */
3095         int status = rte_sched_subport_read_stats(SCHED(p),
3096                 subport_id,
3097                 &s,
3098                 &tc_ov);
3099         if (status)
3100                 return status;
3101
3102         /* Stats accumulate */
3103         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3104                 ns->stats.n_pkts +=
3105                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3106                 ns->stats.n_bytes +=
3107                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3108                 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3109                         s.n_pkts_tc_dropped[tc_id];
3110                 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3111                         s.n_bytes_tc_dropped[tc_id];
3112         }
3113
3114         /* Stats copy */
3115         if (stats)
3116                 memcpy(stats, &ns->stats, sizeof(*stats));
3117
3118         if (stats_mask)
3119                 *stats_mask = STATS_MASK_DEFAULT;
3120
3121         /* Stats clear */
3122         if (clear)
3123                 memset(&ns->stats, 0, sizeof(ns->stats));
3124
3125         return 0;
3126 }
3127
3128 static int
3129 read_pipe_stats(struct rte_eth_dev *dev,
3130         struct tm_node *np,
3131         struct rte_tm_node_stats *stats,
3132         uint64_t *stats_mask,
3133         int clear)
3134 {
3135         struct pmd_internals *p = dev->data->dev_private;
3136
3137         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3138
3139         struct tm_node *ns = np->parent_node;
3140         uint32_t subport_id = tm_node_subport_id(dev, ns);
3141
3142         uint32_t i;
3143
3144         /* Stats read */
3145         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3146                 struct rte_sched_queue_stats s;
3147                 uint16_t qlen;
3148
3149                 uint32_t qid = tm_port_queue_id(dev,
3150                         subport_id,
3151                         pipe_id,
3152                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3153                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3154
3155                 int status = rte_sched_queue_read_stats(SCHED(p),
3156                         qid,
3157                         &s,
3158                         &qlen);
3159                 if (status)
3160                         return status;
3161
3162                 /* Stats accumulate */
3163                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3164                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3165                 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3166                 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3167                         s.n_bytes_dropped;
3168                 np->stats.leaf.n_pkts_queued = qlen;
3169         }
3170
3171         /* Stats copy */
3172         if (stats)
3173                 memcpy(stats, &np->stats, sizeof(*stats));
3174
3175         if (stats_mask)
3176                 *stats_mask = STATS_MASK_DEFAULT;
3177
3178         /* Stats clear */
3179         if (clear)
3180                 memset(&np->stats, 0, sizeof(np->stats));
3181
3182         return 0;
3183 }
3184
3185 static int
3186 read_tc_stats(struct rte_eth_dev *dev,
3187         struct tm_node *nt,
3188         struct rte_tm_node_stats *stats,
3189         uint64_t *stats_mask,
3190         int clear)
3191 {
3192         struct pmd_internals *p = dev->data->dev_private;
3193
3194         uint32_t tc_id = tm_node_tc_id(dev, nt);
3195
3196         struct tm_node *np = nt->parent_node;
3197         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3198
3199         struct tm_node *ns = np->parent_node;
3200         uint32_t subport_id = tm_node_subport_id(dev, ns);
3201
3202         uint32_t i;
3203
3204         /* Stats read */
3205         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3206                 struct rte_sched_queue_stats s;
3207                 uint16_t qlen;
3208
3209                 uint32_t qid = tm_port_queue_id(dev,
3210                         subport_id,
3211                         pipe_id,
3212                         tc_id,
3213                         i);
3214
3215                 int status = rte_sched_queue_read_stats(SCHED(p),
3216                         qid,
3217                         &s,
3218                         &qlen);
3219                 if (status)
3220                         return status;
3221
3222                 /* Stats accumulate */
3223                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3224                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3225                 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3226                 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3227                         s.n_bytes_dropped;
3228                 nt->stats.leaf.n_pkts_queued = qlen;
3229         }
3230
3231         /* Stats copy */
3232         if (stats)
3233                 memcpy(stats, &nt->stats, sizeof(*stats));
3234
3235         if (stats_mask)
3236                 *stats_mask = STATS_MASK_DEFAULT;
3237
3238         /* Stats clear */
3239         if (clear)
3240                 memset(&nt->stats, 0, sizeof(nt->stats));
3241
3242         return 0;
3243 }
3244
3245 static int
3246 read_queue_stats(struct rte_eth_dev *dev,
3247         struct tm_node *nq,
3248         struct rte_tm_node_stats *stats,
3249         uint64_t *stats_mask,
3250         int clear)
3251 {
3252         struct pmd_internals *p = dev->data->dev_private;
3253         struct rte_sched_queue_stats s;
3254         uint16_t qlen;
3255
3256         uint32_t queue_id = tm_node_queue_id(dev, nq);
3257
3258         struct tm_node *nt = nq->parent_node;
3259         uint32_t tc_id = tm_node_tc_id(dev, nt);
3260
3261         struct tm_node *np = nt->parent_node;
3262         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3263
3264         struct tm_node *ns = np->parent_node;
3265         uint32_t subport_id = tm_node_subport_id(dev, ns);
3266
3267         /* Stats read */
3268         uint32_t qid = tm_port_queue_id(dev,
3269                 subport_id,
3270                 pipe_id,
3271                 tc_id,
3272                 queue_id);
3273
3274         int status = rte_sched_queue_read_stats(SCHED(p),
3275                 qid,
3276                 &s,
3277                 &qlen);
3278         if (status)
3279                 return status;
3280
3281         /* Stats accumulate */
3282         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3283         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3284         nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3285         nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3286                 s.n_bytes_dropped;
3287         nq->stats.leaf.n_pkts_queued = qlen;
3288
3289         /* Stats copy */
3290         if (stats)
3291                 memcpy(stats, &nq->stats, sizeof(*stats));
3292
3293         if (stats_mask)
3294                 *stats_mask = STATS_MASK_QUEUE;
3295
3296         /* Stats clear */
3297         if (clear)
3298                 memset(&nq->stats, 0, sizeof(nq->stats));
3299
3300         return 0;
3301 }
3302
3303 /* Traffic manager read stats counters for specific node */
3304 static int
3305 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3306         uint32_t node_id,
3307         struct rte_tm_node_stats *stats,
3308         uint64_t *stats_mask,
3309         int clear,
3310         struct rte_tm_error *error)
3311 {
3312         struct tm_node *n;
3313
3314         /* Port must be started and TM used. */
3315         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3316                 return -rte_tm_error_set(error,
3317                         EBUSY,
3318                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3319                         NULL,
3320                         rte_strerror(EBUSY));
3321
3322         /* Node must be valid */
3323         n = tm_node_search(dev, node_id);
3324         if (n == NULL)
3325                 return -rte_tm_error_set(error,
3326                         EINVAL,
3327                         RTE_TM_ERROR_TYPE_NODE_ID,
3328                         NULL,
3329                         rte_strerror(EINVAL));
3330
3331         switch (n->level) {
3332         case TM_NODE_LEVEL_PORT:
3333                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3334                         return -rte_tm_error_set(error,
3335                                 EINVAL,
3336                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3337                                 NULL,
3338                                 rte_strerror(EINVAL));
3339                 return 0;
3340
3341         case TM_NODE_LEVEL_SUBPORT:
3342                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3343                         return -rte_tm_error_set(error,
3344                                 EINVAL,
3345                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3346                                 NULL,
3347                                 rte_strerror(EINVAL));
3348                 return 0;
3349
3350         case TM_NODE_LEVEL_PIPE:
3351                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3352                         return -rte_tm_error_set(error,
3353                                 EINVAL,
3354                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3355                                 NULL,
3356                                 rte_strerror(EINVAL));
3357                 return 0;
3358
3359         case TM_NODE_LEVEL_TC:
3360                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3361                         return -rte_tm_error_set(error,
3362                                 EINVAL,
3363                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3364                                 NULL,
3365                                 rte_strerror(EINVAL));
3366                 return 0;
3367
3368         case TM_NODE_LEVEL_QUEUE:
3369         default:
3370                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3371                         return -rte_tm_error_set(error,
3372                                 EINVAL,
3373                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3374                                 NULL,
3375                                 rte_strerror(EINVAL));
3376                 return 0;
3377         }
3378 }
3379
3380 const struct rte_tm_ops pmd_tm_ops = {
3381         .node_type_get = pmd_tm_node_type_get,
3382         .capabilities_get = pmd_tm_capabilities_get,
3383         .level_capabilities_get = pmd_tm_level_capabilities_get,
3384         .node_capabilities_get = pmd_tm_node_capabilities_get,
3385
3386         .wred_profile_add = pmd_tm_wred_profile_add,
3387         .wred_profile_delete = pmd_tm_wred_profile_delete,
3388         .shared_wred_context_add_update = NULL,
3389         .shared_wred_context_delete = NULL,
3390
3391         .shaper_profile_add = pmd_tm_shaper_profile_add,
3392         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3393         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3394         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3395
3396         .node_add = pmd_tm_node_add,
3397         .node_delete = pmd_tm_node_delete,
3398         .node_suspend = NULL,
3399         .node_resume = NULL,
3400         .hierarchy_commit = pmd_tm_hierarchy_commit,
3401
3402         .node_parent_update = pmd_tm_node_parent_update,
3403         .node_shaper_update = pmd_tm_node_shaper_update,
3404         .node_shared_shaper_update = NULL,
3405         .node_stats_update = NULL,
3406         .node_wfq_weight_mode_update = NULL,
3407         .node_cman_update = NULL,
3408         .node_wred_context_update = NULL,
3409         .node_shared_wred_context_update = NULL,
3410
3411         .node_stats_read = pmd_tm_node_stats_read,
3412 };