sched: update subport rate dynamically
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21         TAILQ_INIT(&p->tmgr_port_list);
22
23         return 0;
24 }
25
26 void
27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29         for ( ; ; ) {
30                 struct softnic_tmgr_port *tmgr_port;
31
32                 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33                 if (tmgr_port == NULL)
34                         break;
35
36                 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37                 rte_sched_port_free(tmgr_port->s);
38                 free(tmgr_port);
39         }
40 }
41
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
44         const char *name)
45 {
46         struct softnic_tmgr_port *tmgr_port;
47
48         if (name == NULL)
49                 return NULL;
50
51         TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52                 if (strcmp(tmgr_port->name, name) == 0)
53                         return tmgr_port;
54
55         return NULL;
56 }
57
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
60         const char *name)
61 {
62         struct softnic_tmgr_port *tmgr_port;
63         struct tm_params *t = &p->soft.tm.params;
64         struct rte_sched_port *sched;
65         uint32_t n_subports, subport_id;
66
67         /* Check input params */
68         if (name == NULL ||
69                 softnic_tmgr_port_find(p, name))
70                 return NULL;
71
72         /*
73          * Resource
74          */
75
76         /* Is hierarchy frozen? */
77         if (p->soft.tm.hierarchy_frozen == 0)
78                 return NULL;
79
80         /* Port */
81         sched = rte_sched_port_config(&t->port_params);
82         if (sched == NULL)
83                 return NULL;
84
85         /* Subport */
86         n_subports = t->port_params.n_subports_per_port;
87         for (subport_id = 0; subport_id < n_subports; subport_id++) {
88                 uint32_t n_pipes_per_subport =
89                         t->subport_params[subport_id].n_pipes_per_subport_enabled;
90                 uint32_t pipe_id;
91                 int status;
92
93                 status = rte_sched_subport_config(sched,
94                         subport_id,
95                         &t->subport_params[subport_id], 0);
96                 if (status) {
97                         rte_sched_port_free(sched);
98                         return NULL;
99                 }
100
101                 /* Pipe */
102                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
103                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
104                         int profile_id = t->pipe_to_profile[pos];
105
106                         if (profile_id < 0)
107                                 continue;
108
109                         status = rte_sched_pipe_config(sched,
110                                 subport_id,
111                                 pipe_id,
112                                 profile_id);
113                         if (status) {
114                                 rte_sched_port_free(sched);
115                                 return NULL;
116                         }
117                 }
118         }
119
120         /* Node allocation */
121         tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
122         if (tmgr_port == NULL) {
123                 rte_sched_port_free(sched);
124                 return NULL;
125         }
126
127         /* Node fill in */
128         strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
129         tmgr_port->s = sched;
130
131         /* Node add to list */
132         TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
133
134         return tmgr_port;
135 }
136
137 static struct rte_sched_port *
138 SCHED(struct pmd_internals *p)
139 {
140         struct softnic_tmgr_port *tmgr_port;
141
142         tmgr_port = softnic_tmgr_port_find(p, "TMGR");
143         if (tmgr_port == NULL)
144                 return NULL;
145
146         return tmgr_port->s;
147 }
148
149 void
150 tm_hierarchy_init(struct pmd_internals *p)
151 {
152         memset(&p->soft.tm, 0, sizeof(p->soft.tm));
153
154         /* Initialize shaper profile list */
155         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
156
157         /* Initialize shared shaper list */
158         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
159
160         /* Initialize wred profile list */
161         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
162
163         /* Initialize TM node list */
164         TAILQ_INIT(&p->soft.tm.h.nodes);
165 }
166
167 void
168 tm_hierarchy_free(struct pmd_internals *p)
169 {
170         /* Remove all nodes*/
171         for ( ; ; ) {
172                 struct tm_node *tm_node;
173
174                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
175                 if (tm_node == NULL)
176                         break;
177
178                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
179                 free(tm_node);
180         }
181
182         /* Remove all WRED profiles */
183         for ( ; ; ) {
184                 struct tm_wred_profile *wred_profile;
185
186                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
187                 if (wred_profile == NULL)
188                         break;
189
190                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
191                 free(wred_profile);
192         }
193
194         /* Remove all shared shapers */
195         for ( ; ; ) {
196                 struct tm_shared_shaper *shared_shaper;
197
198                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
199                 if (shared_shaper == NULL)
200                         break;
201
202                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
203                 free(shared_shaper);
204         }
205
206         /* Remove all shaper profiles */
207         for ( ; ; ) {
208                 struct tm_shaper_profile *shaper_profile;
209
210                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
211                 if (shaper_profile == NULL)
212                         break;
213
214                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
215                         shaper_profile, node);
216                 free(shaper_profile);
217         }
218
219         tm_hierarchy_init(p);
220 }
221
222 static struct tm_shaper_profile *
223 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
224 {
225         struct pmd_internals *p = dev->data->dev_private;
226         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
227         struct tm_shaper_profile *sp;
228
229         TAILQ_FOREACH(sp, spl, node)
230                 if (shaper_profile_id == sp->shaper_profile_id)
231                         return sp;
232
233         return NULL;
234 }
235
236 static struct tm_shared_shaper *
237 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
238 {
239         struct pmd_internals *p = dev->data->dev_private;
240         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
241         struct tm_shared_shaper *ss;
242
243         TAILQ_FOREACH(ss, ssl, node)
244                 if (shared_shaper_id == ss->shared_shaper_id)
245                         return ss;
246
247         return NULL;
248 }
249
250 static struct tm_wred_profile *
251 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
252 {
253         struct pmd_internals *p = dev->data->dev_private;
254         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
255         struct tm_wred_profile *wp;
256
257         TAILQ_FOREACH(wp, wpl, node)
258                 if (wred_profile_id == wp->wred_profile_id)
259                         return wp;
260
261         return NULL;
262 }
263
264 static struct tm_node *
265 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
266 {
267         struct pmd_internals *p = dev->data->dev_private;
268         struct tm_node_list *nl = &p->soft.tm.h.nodes;
269         struct tm_node *n;
270
271         TAILQ_FOREACH(n, nl, node)
272                 if (n->node_id == node_id)
273                         return n;
274
275         return NULL;
276 }
277
278 static struct tm_node *
279 tm_root_node_present(struct rte_eth_dev *dev)
280 {
281         struct pmd_internals *p = dev->data->dev_private;
282         struct tm_node_list *nl = &p->soft.tm.h.nodes;
283         struct tm_node *n;
284
285         TAILQ_FOREACH(n, nl, node)
286                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
287                         return n;
288
289         return NULL;
290 }
291
292 static uint32_t
293 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
294 {
295         struct pmd_internals *p = dev->data->dev_private;
296         struct tm_node_list *nl = &p->soft.tm.h.nodes;
297         struct tm_node *ns;
298         uint32_t subport_id;
299
300         subport_id = 0;
301         TAILQ_FOREACH(ns, nl, node) {
302                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
303                         continue;
304
305                 if (ns->node_id == subport_node->node_id)
306                         return subport_id;
307
308                 subport_id++;
309         }
310
311         return UINT32_MAX;
312 }
313
314 static uint32_t
315 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
316 {
317         struct pmd_internals *p = dev->data->dev_private;
318         struct tm_node_list *nl = &p->soft.tm.h.nodes;
319         struct tm_node *np;
320         uint32_t pipe_id;
321
322         pipe_id = 0;
323         TAILQ_FOREACH(np, nl, node) {
324                 if (np->level != TM_NODE_LEVEL_PIPE ||
325                         np->parent_node_id != pipe_node->parent_node_id)
326                         continue;
327
328                 if (np->node_id == pipe_node->node_id)
329                         return pipe_id;
330
331                 pipe_id++;
332         }
333
334         return UINT32_MAX;
335 }
336
337 static uint32_t
338 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
339 {
340         return tc_node->priority;
341 }
342
343 static uint32_t
344 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
345 {
346         struct pmd_internals *p = dev->data->dev_private;
347         struct tm_node_list *nl = &p->soft.tm.h.nodes;
348         struct tm_node *nq;
349         uint32_t queue_id;
350
351         queue_id = 0;
352         TAILQ_FOREACH(nq, nl, node) {
353                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
354                         nq->parent_node_id != queue_node->parent_node_id)
355                         continue;
356
357                 if (nq->node_id == queue_node->node_id)
358                         return queue_id;
359
360                 queue_id++;
361         }
362
363         return UINT32_MAX;
364 }
365
366 static uint32_t
367 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
368 {
369         struct pmd_internals *p = dev->data->dev_private;
370         uint32_t n_queues_max = p->params.tm.n_queues;
371         uint32_t n_tc_max =
372                 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
373                 / RTE_SCHED_QUEUES_PER_PIPE;
374         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
375         uint32_t n_subports_max = n_pipes_max;
376         uint32_t n_root_max = 1;
377
378         switch (level) {
379         case TM_NODE_LEVEL_PORT:
380                 return n_root_max;
381         case TM_NODE_LEVEL_SUBPORT:
382                 return n_subports_max;
383         case TM_NODE_LEVEL_PIPE:
384                 return n_pipes_max;
385         case TM_NODE_LEVEL_TC:
386                 return n_tc_max;
387         case TM_NODE_LEVEL_QUEUE:
388         default:
389                 return n_queues_max;
390         }
391 }
392
393 /* Traffic manager node type get */
394 static int
395 pmd_tm_node_type_get(struct rte_eth_dev *dev,
396         uint32_t node_id,
397         int *is_leaf,
398         struct rte_tm_error *error)
399 {
400         struct pmd_internals *p = dev->data->dev_private;
401
402         if (is_leaf == NULL)
403                 return -rte_tm_error_set(error,
404                    EINVAL,
405                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
406                    NULL,
407                    rte_strerror(EINVAL));
408
409         if (node_id == RTE_TM_NODE_ID_NULL ||
410                 (tm_node_search(dev, node_id) == NULL))
411                 return -rte_tm_error_set(error,
412                    EINVAL,
413                    RTE_TM_ERROR_TYPE_NODE_ID,
414                    NULL,
415                    rte_strerror(EINVAL));
416
417         *is_leaf = node_id < p->params.tm.n_queues;
418
419         return 0;
420 }
421
422 #ifdef RTE_SCHED_RED
423 #define WRED_SUPPORTED                                          1
424 #else
425 #define WRED_SUPPORTED                                          0
426 #endif
427
428 #define STATS_MASK_DEFAULT                                      \
429         (RTE_TM_STATS_N_PKTS |                                  \
430         RTE_TM_STATS_N_BYTES |                                  \
431         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
432         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
433
434 #define STATS_MASK_QUEUE                                                \
435         (STATS_MASK_DEFAULT |                                   \
436         RTE_TM_STATS_N_PKTS_QUEUED)
437
438 static const struct rte_tm_capabilities tm_cap = {
439         .n_nodes_max = UINT32_MAX,
440         .n_levels_max = TM_NODE_LEVEL_MAX,
441
442         .non_leaf_nodes_identical = 0,
443         .leaf_nodes_identical = 1,
444
445         .shaper_n_max = UINT32_MAX,
446         .shaper_private_n_max = UINT32_MAX,
447         .shaper_private_dual_rate_n_max = 0,
448         .shaper_private_rate_min = 1,
449         .shaper_private_rate_max = UINT32_MAX,
450         .shaper_private_packet_mode_supported = 0,
451         .shaper_private_byte_mode_supported = 1,
452
453         .shaper_shared_n_max = UINT32_MAX,
454         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
455         .shaper_shared_n_shapers_per_node_max = 1,
456         .shaper_shared_dual_rate_n_max = 0,
457         .shaper_shared_rate_min = 1,
458         .shaper_shared_rate_max = UINT32_MAX,
459         .shaper_shared_packet_mode_supported = 0,
460         .shaper_shared_byte_mode_supported = 1,
461
462         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
463         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
464
465         .sched_n_children_max = UINT32_MAX,
466         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
467         .sched_wfq_n_children_per_group_max = UINT32_MAX,
468         .sched_wfq_n_groups_max = 1,
469         .sched_wfq_weight_max = UINT32_MAX,
470         .sched_wfq_packet_mode_supported = 0,
471         .sched_wfq_byte_mode_supported = 1,
472
473         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
474         .cman_wred_byte_mode_supported = 0,
475         .cman_head_drop_supported = 0,
476         .cman_wred_context_n_max = 0,
477         .cman_wred_context_private_n_max = 0,
478         .cman_wred_context_shared_n_max = 0,
479         .cman_wred_context_shared_n_nodes_per_context_max = 0,
480         .cman_wred_context_shared_n_contexts_per_node_max = 0,
481
482         .mark_vlan_dei_supported = {0, 0, 0},
483         .mark_ip_ecn_tcp_supported = {0, 0, 0},
484         .mark_ip_ecn_sctp_supported = {0, 0, 0},
485         .mark_ip_dscp_supported = {0, 0, 0},
486
487         .dynamic_update_mask = 0,
488
489         .stats_mask = STATS_MASK_QUEUE,
490 };
491
492 /* Traffic manager capabilities get */
493 static int
494 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
495         struct rte_tm_capabilities *cap,
496         struct rte_tm_error *error)
497 {
498         if (cap == NULL)
499                 return -rte_tm_error_set(error,
500                    EINVAL,
501                    RTE_TM_ERROR_TYPE_CAPABILITIES,
502                    NULL,
503                    rte_strerror(EINVAL));
504
505         memcpy(cap, &tm_cap, sizeof(*cap));
506
507         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
508                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
509                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
510                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
511                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
512
513         cap->shaper_private_n_max =
514                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
515                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
516                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
517                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
518
519         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
520                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
521
522         cap->shaper_n_max = cap->shaper_private_n_max +
523                 cap->shaper_shared_n_max;
524
525         cap->shaper_shared_n_nodes_per_shaper_max =
526                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
527
528         cap->sched_n_children_max = RTE_MAX(
529                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
530                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
531
532         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
533
534         if (WRED_SUPPORTED)
535                 cap->cman_wred_context_private_n_max =
536                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
537
538         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
539                 cap->cman_wred_context_shared_n_max;
540
541         return 0;
542 }
543
544 static const struct rte_tm_level_capabilities tm_level_cap[] = {
545         [TM_NODE_LEVEL_PORT] = {
546                 .n_nodes_max = 1,
547                 .n_nodes_nonleaf_max = 1,
548                 .n_nodes_leaf_max = 0,
549                 .non_leaf_nodes_identical = 1,
550                 .leaf_nodes_identical = 0,
551
552                 {.nonleaf = {
553                         .shaper_private_supported = 1,
554                         .shaper_private_dual_rate_supported = 0,
555                         .shaper_private_rate_min = 1,
556                         .shaper_private_rate_max = UINT32_MAX,
557                         .shaper_private_packet_mode_supported = 0,
558                         .shaper_private_byte_mode_supported = 1,
559                         .shaper_shared_n_max = 0,
560                         .shaper_shared_packet_mode_supported = 0,
561                         .shaper_shared_byte_mode_supported = 0,
562
563                         .sched_n_children_max = UINT32_MAX,
564                         .sched_sp_n_priorities_max = 1,
565                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
566                         .sched_wfq_n_groups_max = 1,
567                         .sched_wfq_weight_max = 1,
568                         .sched_wfq_packet_mode_supported = 0,
569                         .sched_wfq_byte_mode_supported = 1,
570
571                         .stats_mask = STATS_MASK_DEFAULT,
572                 } },
573         },
574
575         [TM_NODE_LEVEL_SUBPORT] = {
576                 .n_nodes_max = UINT32_MAX,
577                 .n_nodes_nonleaf_max = UINT32_MAX,
578                 .n_nodes_leaf_max = 0,
579                 .non_leaf_nodes_identical = 1,
580                 .leaf_nodes_identical = 0,
581
582                 {.nonleaf = {
583                         .shaper_private_supported = 1,
584                         .shaper_private_dual_rate_supported = 0,
585                         .shaper_private_rate_min = 1,
586                         .shaper_private_rate_max = UINT32_MAX,
587                         .shaper_private_packet_mode_supported = 0,
588                         .shaper_private_byte_mode_supported = 1,
589                         .shaper_shared_n_max = 0,
590                         .shaper_shared_packet_mode_supported = 0,
591                         .shaper_shared_byte_mode_supported = 0,
592
593                         .sched_n_children_max = UINT32_MAX,
594                         .sched_sp_n_priorities_max = 1,
595                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
596                         .sched_wfq_n_groups_max = 1,
597 #ifdef RTE_SCHED_SUBPORT_TC_OV
598                         .sched_wfq_weight_max = UINT32_MAX,
599                         .sched_wfq_packet_mode_supported = 0,
600                         .sched_wfq_byte_mode_supported = 1,
601 #else
602                         .sched_wfq_weight_max = 1,
603                         .sched_wfq_packet_mode_supported = 0,
604                         .sched_wfq_byte_mode_supported = 1,
605 #endif
606
607                         .stats_mask = STATS_MASK_DEFAULT,
608                 } },
609         },
610
611         [TM_NODE_LEVEL_PIPE] = {
612                 .n_nodes_max = UINT32_MAX,
613                 .n_nodes_nonleaf_max = UINT32_MAX,
614                 .n_nodes_leaf_max = 0,
615                 .non_leaf_nodes_identical = 1,
616                 .leaf_nodes_identical = 0,
617
618                 {.nonleaf = {
619                         .shaper_private_supported = 1,
620                         .shaper_private_dual_rate_supported = 0,
621                         .shaper_private_rate_min = 1,
622                         .shaper_private_rate_max = UINT32_MAX,
623                         .shaper_private_packet_mode_supported = 0,
624                         .shaper_private_byte_mode_supported = 1,
625                         .shaper_shared_n_max = 0,
626                         .shaper_shared_packet_mode_supported = 0,
627                         .shaper_shared_byte_mode_supported = 0,
628
629                         .sched_n_children_max =
630                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
631                         .sched_sp_n_priorities_max =
632                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
633                         .sched_wfq_n_children_per_group_max = 1,
634                         .sched_wfq_n_groups_max = 0,
635                         .sched_wfq_weight_max = 1,
636                         .sched_wfq_packet_mode_supported = 0,
637                         .sched_wfq_byte_mode_supported = 0,
638
639                         .stats_mask = STATS_MASK_DEFAULT,
640                 } },
641         },
642
643         [TM_NODE_LEVEL_TC] = {
644                 .n_nodes_max = UINT32_MAX,
645                 .n_nodes_nonleaf_max = UINT32_MAX,
646                 .n_nodes_leaf_max = 0,
647                 .non_leaf_nodes_identical = 1,
648                 .leaf_nodes_identical = 0,
649
650                 {.nonleaf = {
651                         .shaper_private_supported = 1,
652                         .shaper_private_dual_rate_supported = 0,
653                         .shaper_private_rate_min = 1,
654                         .shaper_private_rate_max = UINT32_MAX,
655                         .shaper_private_packet_mode_supported = 0,
656                         .shaper_private_byte_mode_supported = 1,
657                         .shaper_shared_n_max = 1,
658                         .shaper_shared_packet_mode_supported = 0,
659                         .shaper_shared_byte_mode_supported = 1,
660
661                         .sched_n_children_max =
662                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
663                         .sched_sp_n_priorities_max = 1,
664                         .sched_wfq_n_children_per_group_max =
665                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
666                         .sched_wfq_n_groups_max = 1,
667                         .sched_wfq_weight_max = UINT32_MAX,
668                         .sched_wfq_packet_mode_supported = 0,
669                         .sched_wfq_byte_mode_supported = 1,
670
671                         .stats_mask = STATS_MASK_DEFAULT,
672                 } },
673         },
674
675         [TM_NODE_LEVEL_QUEUE] = {
676                 .n_nodes_max = UINT32_MAX,
677                 .n_nodes_nonleaf_max = 0,
678                 .n_nodes_leaf_max = UINT32_MAX,
679                 .non_leaf_nodes_identical = 0,
680                 .leaf_nodes_identical = 1,
681
682                 {.leaf = {
683                         .shaper_private_supported = 0,
684                         .shaper_private_dual_rate_supported = 0,
685                         .shaper_private_rate_min = 0,
686                         .shaper_private_rate_max = 0,
687                         .shaper_private_packet_mode_supported = 0,
688                         .shaper_private_byte_mode_supported = 0,
689                         .shaper_shared_n_max = 0,
690                         .shaper_shared_packet_mode_supported = 0,
691                         .shaper_shared_byte_mode_supported = 0,
692
693                         .cman_head_drop_supported = 0,
694                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
695                         .cman_wred_byte_mode_supported = 0,
696                         .cman_wred_context_private_supported = WRED_SUPPORTED,
697                         .cman_wred_context_shared_n_max = 0,
698
699                         .stats_mask = STATS_MASK_QUEUE,
700                 } },
701         },
702 };
703
704 /* Traffic manager level capabilities get */
705 static int
706 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
707         uint32_t level_id,
708         struct rte_tm_level_capabilities *cap,
709         struct rte_tm_error *error)
710 {
711         if (cap == NULL)
712                 return -rte_tm_error_set(error,
713                    EINVAL,
714                    RTE_TM_ERROR_TYPE_CAPABILITIES,
715                    NULL,
716                    rte_strerror(EINVAL));
717
718         if (level_id >= TM_NODE_LEVEL_MAX)
719                 return -rte_tm_error_set(error,
720                    EINVAL,
721                    RTE_TM_ERROR_TYPE_LEVEL_ID,
722                    NULL,
723                    rte_strerror(EINVAL));
724
725         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
726
727         switch (level_id) {
728         case TM_NODE_LEVEL_PORT:
729                 cap->nonleaf.sched_n_children_max =
730                         tm_level_get_max_nodes(dev,
731                                 TM_NODE_LEVEL_SUBPORT);
732                 cap->nonleaf.sched_wfq_n_children_per_group_max =
733                         cap->nonleaf.sched_n_children_max;
734                 break;
735
736         case TM_NODE_LEVEL_SUBPORT:
737                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
738                         TM_NODE_LEVEL_SUBPORT);
739                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
740                 cap->nonleaf.sched_n_children_max =
741                         tm_level_get_max_nodes(dev,
742                                 TM_NODE_LEVEL_PIPE);
743                 cap->nonleaf.sched_wfq_n_children_per_group_max =
744                         cap->nonleaf.sched_n_children_max;
745                 break;
746
747         case TM_NODE_LEVEL_PIPE:
748                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
749                         TM_NODE_LEVEL_PIPE);
750                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
751                 break;
752
753         case TM_NODE_LEVEL_TC:
754                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
755                         TM_NODE_LEVEL_TC);
756                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
757                 break;
758
759         case TM_NODE_LEVEL_QUEUE:
760         default:
761                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
762                         TM_NODE_LEVEL_QUEUE);
763                 cap->n_nodes_leaf_max = cap->n_nodes_max;
764                 break;
765         }
766
767         return 0;
768 }
769
770 static const struct rte_tm_node_capabilities tm_node_cap[] = {
771         [TM_NODE_LEVEL_PORT] = {
772                 .shaper_private_supported = 1,
773                 .shaper_private_dual_rate_supported = 0,
774                 .shaper_private_rate_min = 1,
775                 .shaper_private_rate_max = UINT32_MAX,
776                 .shaper_private_packet_mode_supported = 0,
777                 .shaper_private_byte_mode_supported = 1,
778                 .shaper_shared_n_max = 0,
779                 .shaper_shared_packet_mode_supported = 0,
780                 .shaper_shared_byte_mode_supported = 0,
781
782                 {.nonleaf = {
783                         .sched_n_children_max = UINT32_MAX,
784                         .sched_sp_n_priorities_max = 1,
785                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
786                         .sched_wfq_n_groups_max = 1,
787                         .sched_wfq_weight_max = 1,
788                         .sched_wfq_packet_mode_supported = 0,
789                         .sched_wfq_byte_mode_supported = 1,
790                 } },
791
792                 .stats_mask = STATS_MASK_DEFAULT,
793         },
794
795         [TM_NODE_LEVEL_SUBPORT] = {
796                 .shaper_private_supported = 1,
797                 .shaper_private_dual_rate_supported = 0,
798                 .shaper_private_rate_min = 1,
799                 .shaper_private_rate_max = UINT32_MAX,
800                 .shaper_private_packet_mode_supported = 0,
801                 .shaper_private_byte_mode_supported = 1,
802                 .shaper_shared_n_max = 0,
803                 .shaper_shared_packet_mode_supported = 0,
804                 .shaper_shared_byte_mode_supported = 0,
805
806                 {.nonleaf = {
807                         .sched_n_children_max = UINT32_MAX,
808                         .sched_sp_n_priorities_max = 1,
809                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
810                         .sched_wfq_n_groups_max = 1,
811                         .sched_wfq_weight_max = UINT32_MAX,
812                         .sched_wfq_packet_mode_supported = 0,
813                         .sched_wfq_byte_mode_supported = 1,
814                 } },
815
816                 .stats_mask = STATS_MASK_DEFAULT,
817         },
818
819         [TM_NODE_LEVEL_PIPE] = {
820                 .shaper_private_supported = 1,
821                 .shaper_private_dual_rate_supported = 0,
822                 .shaper_private_rate_min = 1,
823                 .shaper_private_rate_max = UINT32_MAX,
824                 .shaper_private_packet_mode_supported = 0,
825                 .shaper_private_byte_mode_supported = 1,
826                 .shaper_shared_n_max = 0,
827                 .shaper_shared_packet_mode_supported = 0,
828                 .shaper_shared_byte_mode_supported = 0,
829
830                 {.nonleaf = {
831                         .sched_n_children_max =
832                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
833                         .sched_sp_n_priorities_max =
834                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
835                         .sched_wfq_n_children_per_group_max = 1,
836                         .sched_wfq_n_groups_max = 0,
837                         .sched_wfq_weight_max = 1,
838                         .sched_wfq_packet_mode_supported = 0,
839                         .sched_wfq_byte_mode_supported = 0,
840                 } },
841
842                 .stats_mask = STATS_MASK_DEFAULT,
843         },
844
845         [TM_NODE_LEVEL_TC] = {
846                 .shaper_private_supported = 1,
847                 .shaper_private_dual_rate_supported = 0,
848                 .shaper_private_rate_min = 1,
849                 .shaper_private_rate_max = UINT32_MAX,
850                 .shaper_private_packet_mode_supported = 0,
851                 .shaper_private_byte_mode_supported = 1,
852                 .shaper_shared_n_max = 1,
853                 .shaper_shared_packet_mode_supported = 0,
854                 .shaper_shared_byte_mode_supported = 1,
855
856                 {.nonleaf = {
857                         .sched_n_children_max =
858                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
859                         .sched_sp_n_priorities_max = 1,
860                         .sched_wfq_n_children_per_group_max =
861                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
862                         .sched_wfq_n_groups_max = 1,
863                         .sched_wfq_weight_max = UINT32_MAX,
864                         .sched_wfq_packet_mode_supported = 0,
865                         .sched_wfq_byte_mode_supported = 1,
866                 } },
867
868                 .stats_mask = STATS_MASK_DEFAULT,
869         },
870
871         [TM_NODE_LEVEL_QUEUE] = {
872                 .shaper_private_supported = 0,
873                 .shaper_private_dual_rate_supported = 0,
874                 .shaper_private_rate_min = 0,
875                 .shaper_private_rate_max = 0,
876                 .shaper_private_packet_mode_supported = 0,
877                 .shaper_private_byte_mode_supported = 0,
878                 .shaper_shared_n_max = 0,
879                 .shaper_shared_packet_mode_supported = 0,
880                 .shaper_shared_byte_mode_supported = 0,
881
882
883                 {.leaf = {
884                         .cman_head_drop_supported = 0,
885                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
886                         .cman_wred_byte_mode_supported = 0,
887                         .cman_wred_context_private_supported = WRED_SUPPORTED,
888                         .cman_wred_context_shared_n_max = 0,
889                 } },
890
891                 .stats_mask = STATS_MASK_QUEUE,
892         },
893 };
894
895 /* Traffic manager node capabilities get */
896 static int
897 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
898         uint32_t node_id,
899         struct rte_tm_node_capabilities *cap,
900         struct rte_tm_error *error)
901 {
902         struct tm_node *tm_node;
903
904         if (cap == NULL)
905                 return -rte_tm_error_set(error,
906                    EINVAL,
907                    RTE_TM_ERROR_TYPE_CAPABILITIES,
908                    NULL,
909                    rte_strerror(EINVAL));
910
911         tm_node = tm_node_search(dev, node_id);
912         if (tm_node == NULL)
913                 return -rte_tm_error_set(error,
914                    EINVAL,
915                    RTE_TM_ERROR_TYPE_NODE_ID,
916                    NULL,
917                    rte_strerror(EINVAL));
918
919         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
920
921         switch (tm_node->level) {
922         case TM_NODE_LEVEL_PORT:
923                 cap->nonleaf.sched_n_children_max =
924                         tm_level_get_max_nodes(dev,
925                                 TM_NODE_LEVEL_SUBPORT);
926                 cap->nonleaf.sched_wfq_n_children_per_group_max =
927                         cap->nonleaf.sched_n_children_max;
928                 break;
929
930         case TM_NODE_LEVEL_SUBPORT:
931                 cap->nonleaf.sched_n_children_max =
932                         tm_level_get_max_nodes(dev,
933                                 TM_NODE_LEVEL_PIPE);
934                 cap->nonleaf.sched_wfq_n_children_per_group_max =
935                         cap->nonleaf.sched_n_children_max;
936                 break;
937
938         case TM_NODE_LEVEL_PIPE:
939         case TM_NODE_LEVEL_TC:
940         case TM_NODE_LEVEL_QUEUE:
941         default:
942                 break;
943         }
944
945         return 0;
946 }
947
948 static int
949 shaper_profile_check(struct rte_eth_dev *dev,
950         uint32_t shaper_profile_id,
951         struct rte_tm_shaper_params *profile,
952         struct rte_tm_error *error)
953 {
954         struct tm_shaper_profile *sp;
955
956         /* Shaper profile ID must not be NONE. */
957         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
958                 return -rte_tm_error_set(error,
959                         EINVAL,
960                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
961                         NULL,
962                         rte_strerror(EINVAL));
963
964         /* Shaper profile must not exist. */
965         sp = tm_shaper_profile_search(dev, shaper_profile_id);
966         if (sp)
967                 return -rte_tm_error_set(error,
968                         EEXIST,
969                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
970                         NULL,
971                         rte_strerror(EEXIST));
972
973         /* Profile must not be NULL. */
974         if (profile == NULL)
975                 return -rte_tm_error_set(error,
976                         EINVAL,
977                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
978                         NULL,
979                         rte_strerror(EINVAL));
980
981         /* Peak rate: non-zero, 32-bit */
982         if (profile->peak.rate == 0 ||
983                 profile->peak.rate >= UINT32_MAX)
984                 return -rte_tm_error_set(error,
985                         EINVAL,
986                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
987                         NULL,
988                         rte_strerror(EINVAL));
989
990         /* Peak size: non-zero, 32-bit */
991         if (profile->peak.size == 0 ||
992                 profile->peak.size >= UINT32_MAX)
993                 return -rte_tm_error_set(error,
994                         EINVAL,
995                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
996                         NULL,
997                         rte_strerror(EINVAL));
998
999         /* Dual-rate profiles are not supported. */
1000         if (profile->committed.rate != 0)
1001                 return -rte_tm_error_set(error,
1002                         EINVAL,
1003                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
1004                         NULL,
1005                         rte_strerror(EINVAL));
1006
1007         /* Packet length adjust: 24 bytes */
1008         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
1009                 return -rte_tm_error_set(error,
1010                         EINVAL,
1011                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
1012                         NULL,
1013                         rte_strerror(EINVAL));
1014
1015         /* Packet mode is not supported. */
1016         if (profile->packet_mode != 0)
1017                 return -rte_tm_error_set(error,
1018                         EINVAL,
1019                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
1020                         NULL,
1021                         rte_strerror(EINVAL));
1022         return 0;
1023 }
1024
1025 /* Traffic manager shaper profile add */
1026 static int
1027 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
1028         uint32_t shaper_profile_id,
1029         struct rte_tm_shaper_params *profile,
1030         struct rte_tm_error *error)
1031 {
1032         struct pmd_internals *p = dev->data->dev_private;
1033         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1034         struct tm_shaper_profile *sp;
1035         int status;
1036
1037         /* Check input params */
1038         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1039         if (status)
1040                 return status;
1041
1042         /* Memory allocation */
1043         sp = calloc(1, sizeof(struct tm_shaper_profile));
1044         if (sp == NULL)
1045                 return -rte_tm_error_set(error,
1046                         ENOMEM,
1047                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1048                         NULL,
1049                         rte_strerror(ENOMEM));
1050
1051         /* Fill in */
1052         sp->shaper_profile_id = shaper_profile_id;
1053         memcpy(&sp->params, profile, sizeof(sp->params));
1054
1055         /* Add to list */
1056         TAILQ_INSERT_TAIL(spl, sp, node);
1057         p->soft.tm.h.n_shaper_profiles++;
1058
1059         return 0;
1060 }
1061
1062 /* Traffic manager shaper profile delete */
1063 static int
1064 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1065         uint32_t shaper_profile_id,
1066         struct rte_tm_error *error)
1067 {
1068         struct pmd_internals *p = dev->data->dev_private;
1069         struct tm_shaper_profile *sp;
1070
1071         /* Check existing */
1072         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1073         if (sp == NULL)
1074                 return -rte_tm_error_set(error,
1075                         EINVAL,
1076                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1077                         NULL,
1078                         rte_strerror(EINVAL));
1079
1080         /* Check unused */
1081         if (sp->n_users)
1082                 return -rte_tm_error_set(error,
1083                         EBUSY,
1084                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1085                         NULL,
1086                         rte_strerror(EBUSY));
1087
1088         /* Remove from list */
1089         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1090         p->soft.tm.h.n_shaper_profiles--;
1091         free(sp);
1092
1093         return 0;
1094 }
1095
1096 static struct tm_node *
1097 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1098         struct tm_shared_shaper *ss)
1099 {
1100         struct pmd_internals *p = dev->data->dev_private;
1101         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1102         struct tm_node *n;
1103
1104         /* Subport: each TC uses shared shaper  */
1105         TAILQ_FOREACH(n, nl, node) {
1106                 if (n->level != TM_NODE_LEVEL_TC ||
1107                         n->params.n_shared_shapers == 0 ||
1108                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1109                         continue;
1110
1111                 return n;
1112         }
1113
1114         return NULL;
1115 }
1116
1117 static int
1118 update_subport_tc_rate(struct rte_eth_dev *dev,
1119         struct tm_node *nt,
1120         struct tm_shared_shaper *ss,
1121         struct tm_shaper_profile *sp_new)
1122 {
1123         struct pmd_internals *p = dev->data->dev_private;
1124         uint32_t tc_id = tm_node_tc_id(dev, nt);
1125
1126         struct tm_node *np = nt->parent_node;
1127
1128         struct tm_node *ns = np->parent_node;
1129         uint32_t subport_id = tm_node_subport_id(dev, ns);
1130
1131         struct rte_sched_subport_params subport_params;
1132
1133         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1134                 ss->shaper_profile_id);
1135
1136         /* Derive new subport configuration. */
1137         memcpy(&subport_params,
1138                 &p->soft.tm.params.subport_params[subport_id],
1139                 sizeof(subport_params));
1140         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1141
1142         /* Update the subport configuration. */
1143         if (rte_sched_subport_config(SCHED(p),
1144                 subport_id, &subport_params, 0))
1145                 return -1;
1146
1147         /* Commit changes. */
1148         sp_old->n_users--;
1149
1150         ss->shaper_profile_id = sp_new->shaper_profile_id;
1151         sp_new->n_users++;
1152
1153         memcpy(&p->soft.tm.params.subport_params[subport_id],
1154                 &subport_params,
1155                 sizeof(subport_params));
1156
1157         return 0;
1158 }
1159
1160 /* Traffic manager shared shaper add/update */
1161 static int
1162 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1163         uint32_t shared_shaper_id,
1164         uint32_t shaper_profile_id,
1165         struct rte_tm_error *error)
1166 {
1167         struct pmd_internals *p = dev->data->dev_private;
1168         struct tm_shared_shaper *ss;
1169         struct tm_shaper_profile *sp;
1170         struct tm_node *nt;
1171
1172         /* Shaper profile must be valid. */
1173         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1174         if (sp == NULL)
1175                 return -rte_tm_error_set(error,
1176                         EINVAL,
1177                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1178                         NULL,
1179                         rte_strerror(EINVAL));
1180
1181         /**
1182          * Add new shared shaper
1183          */
1184         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1185         if (ss == NULL) {
1186                 struct tm_shared_shaper_list *ssl =
1187                         &p->soft.tm.h.shared_shapers;
1188
1189                 /* Hierarchy must not be frozen */
1190                 if (p->soft.tm.hierarchy_frozen)
1191                         return -rte_tm_error_set(error,
1192                                 EBUSY,
1193                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1194                                 NULL,
1195                                 rte_strerror(EBUSY));
1196
1197                 /* Memory allocation */
1198                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1199                 if (ss == NULL)
1200                         return -rte_tm_error_set(error,
1201                                 ENOMEM,
1202                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1203                                 NULL,
1204                                 rte_strerror(ENOMEM));
1205
1206                 /* Fill in */
1207                 ss->shared_shaper_id = shared_shaper_id;
1208                 ss->shaper_profile_id = shaper_profile_id;
1209
1210                 /* Add to list */
1211                 TAILQ_INSERT_TAIL(ssl, ss, node);
1212                 p->soft.tm.h.n_shared_shapers++;
1213
1214                 return 0;
1215         }
1216
1217         /**
1218          * Update existing shared shaper
1219          */
1220         /* Hierarchy must be frozen (run-time update) */
1221         if (p->soft.tm.hierarchy_frozen == 0)
1222                 return -rte_tm_error_set(error,
1223                         EBUSY,
1224                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1225                         NULL,
1226                         rte_strerror(EBUSY));
1227
1228
1229         /* Propagate change. */
1230         nt = tm_shared_shaper_get_tc(dev, ss);
1231         if (update_subport_tc_rate(dev, nt, ss, sp))
1232                 return -rte_tm_error_set(error,
1233                         EINVAL,
1234                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1235                         NULL,
1236                         rte_strerror(EINVAL));
1237
1238         return 0;
1239 }
1240
1241 /* Traffic manager shared shaper delete */
1242 static int
1243 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1244         uint32_t shared_shaper_id,
1245         struct rte_tm_error *error)
1246 {
1247         struct pmd_internals *p = dev->data->dev_private;
1248         struct tm_shared_shaper *ss;
1249
1250         /* Check existing */
1251         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1252         if (ss == NULL)
1253                 return -rte_tm_error_set(error,
1254                         EINVAL,
1255                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1256                         NULL,
1257                         rte_strerror(EINVAL));
1258
1259         /* Check unused */
1260         if (ss->n_users)
1261                 return -rte_tm_error_set(error,
1262                         EBUSY,
1263                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1264                         NULL,
1265                         rte_strerror(EBUSY));
1266
1267         /* Remove from list */
1268         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1269         p->soft.tm.h.n_shared_shapers--;
1270         free(ss);
1271
1272         return 0;
1273 }
1274
1275 static int
1276 wred_profile_check(struct rte_eth_dev *dev,
1277         uint32_t wred_profile_id,
1278         struct rte_tm_wred_params *profile,
1279         struct rte_tm_error *error)
1280 {
1281         struct tm_wred_profile *wp;
1282         enum rte_color color;
1283
1284         /* WRED profile ID must not be NONE. */
1285         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1286                 return -rte_tm_error_set(error,
1287                         EINVAL,
1288                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1289                         NULL,
1290                         rte_strerror(EINVAL));
1291
1292         /* WRED profile must not exist. */
1293         wp = tm_wred_profile_search(dev, wred_profile_id);
1294         if (wp)
1295                 return -rte_tm_error_set(error,
1296                         EEXIST,
1297                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1298                         NULL,
1299                         rte_strerror(EEXIST));
1300
1301         /* Profile must not be NULL. */
1302         if (profile == NULL)
1303                 return -rte_tm_error_set(error,
1304                         EINVAL,
1305                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1306                         NULL,
1307                         rte_strerror(EINVAL));
1308
1309         /* WRED profile should be in packet mode */
1310         if (profile->packet_mode == 0)
1311                 return -rte_tm_error_set(error,
1312                         ENOTSUP,
1313                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1314                         NULL,
1315                         rte_strerror(ENOTSUP));
1316
1317         /* min_th <= max_th, max_th > 0  */
1318         for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1319                 uint32_t min_th = profile->red_params[color].min_th;
1320                 uint32_t max_th = profile->red_params[color].max_th;
1321
1322                 if (min_th > max_th ||
1323                         max_th == 0 ||
1324                         min_th > UINT16_MAX ||
1325                         max_th > UINT16_MAX)
1326                         return -rte_tm_error_set(error,
1327                                 EINVAL,
1328                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1329                                 NULL,
1330                                 rte_strerror(EINVAL));
1331         }
1332
1333         return 0;
1334 }
1335
1336 /* Traffic manager WRED profile add */
1337 static int
1338 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1339         uint32_t wred_profile_id,
1340         struct rte_tm_wred_params *profile,
1341         struct rte_tm_error *error)
1342 {
1343         struct pmd_internals *p = dev->data->dev_private;
1344         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1345         struct tm_wred_profile *wp;
1346         int status;
1347
1348         /* Check input params */
1349         status = wred_profile_check(dev, wred_profile_id, profile, error);
1350         if (status)
1351                 return status;
1352
1353         /* Memory allocation */
1354         wp = calloc(1, sizeof(struct tm_wred_profile));
1355         if (wp == NULL)
1356                 return -rte_tm_error_set(error,
1357                         ENOMEM,
1358                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1359                         NULL,
1360                         rte_strerror(ENOMEM));
1361
1362         /* Fill in */
1363         wp->wred_profile_id = wred_profile_id;
1364         memcpy(&wp->params, profile, sizeof(wp->params));
1365
1366         /* Add to list */
1367         TAILQ_INSERT_TAIL(wpl, wp, node);
1368         p->soft.tm.h.n_wred_profiles++;
1369
1370         return 0;
1371 }
1372
1373 /* Traffic manager WRED profile delete */
1374 static int
1375 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1376         uint32_t wred_profile_id,
1377         struct rte_tm_error *error)
1378 {
1379         struct pmd_internals *p = dev->data->dev_private;
1380         struct tm_wred_profile *wp;
1381
1382         /* Check existing */
1383         wp = tm_wred_profile_search(dev, wred_profile_id);
1384         if (wp == NULL)
1385                 return -rte_tm_error_set(error,
1386                         EINVAL,
1387                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1388                         NULL,
1389                         rte_strerror(EINVAL));
1390
1391         /* Check unused */
1392         if (wp->n_users)
1393                 return -rte_tm_error_set(error,
1394                         EBUSY,
1395                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1396                         NULL,
1397                         rte_strerror(EBUSY));
1398
1399         /* Remove from list */
1400         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1401         p->soft.tm.h.n_wred_profiles--;
1402         free(wp);
1403
1404         return 0;
1405 }
1406
1407 static int
1408 node_add_check_port(struct rte_eth_dev *dev,
1409         uint32_t node_id,
1410         uint32_t parent_node_id __rte_unused,
1411         uint32_t priority,
1412         uint32_t weight,
1413         uint32_t level_id __rte_unused,
1414         struct rte_tm_node_params *params,
1415         struct rte_tm_error *error)
1416 {
1417         struct pmd_internals *p = dev->data->dev_private;
1418         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1419                 params->shaper_profile_id);
1420
1421         /* node type: non-leaf */
1422         if (node_id < p->params.tm.n_queues)
1423                 return -rte_tm_error_set(error,
1424                         EINVAL,
1425                         RTE_TM_ERROR_TYPE_NODE_ID,
1426                         NULL,
1427                         rte_strerror(EINVAL));
1428
1429         /* Priority must be 0 */
1430         if (priority != 0)
1431                 return -rte_tm_error_set(error,
1432                         EINVAL,
1433                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1434                         NULL,
1435                         rte_strerror(EINVAL));
1436
1437         /* Weight must be 1 */
1438         if (weight != 1)
1439                 return -rte_tm_error_set(error,
1440                         EINVAL,
1441                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1442                         NULL,
1443                         rte_strerror(EINVAL));
1444
1445         /* Shaper must be valid */
1446         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1447                 sp == NULL)
1448                 return -rte_tm_error_set(error,
1449                         EINVAL,
1450                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1451                         NULL,
1452                         rte_strerror(EINVAL));
1453
1454         /* No shared shapers */
1455         if (params->n_shared_shapers != 0)
1456                 return -rte_tm_error_set(error,
1457                         EINVAL,
1458                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1459                         NULL,
1460                         rte_strerror(EINVAL));
1461
1462         /* Number of SP priorities must be 1 */
1463         if (params->nonleaf.n_sp_priorities != 1)
1464                 return -rte_tm_error_set(error,
1465                         EINVAL,
1466                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1467                         NULL,
1468                         rte_strerror(EINVAL));
1469
1470         /* Stats */
1471         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1472                 return -rte_tm_error_set(error,
1473                         EINVAL,
1474                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1475                         NULL,
1476                         rte_strerror(EINVAL));
1477
1478         return 0;
1479 }
1480
1481 static int
1482 node_add_check_subport(struct rte_eth_dev *dev,
1483         uint32_t node_id,
1484         uint32_t parent_node_id __rte_unused,
1485         uint32_t priority,
1486         uint32_t weight,
1487         uint32_t level_id __rte_unused,
1488         struct rte_tm_node_params *params,
1489         struct rte_tm_error *error)
1490 {
1491         struct pmd_internals *p = dev->data->dev_private;
1492
1493         /* node type: non-leaf */
1494         if (node_id < p->params.tm.n_queues)
1495                 return -rte_tm_error_set(error,
1496                         EINVAL,
1497                         RTE_TM_ERROR_TYPE_NODE_ID,
1498                         NULL,
1499                         rte_strerror(EINVAL));
1500
1501         /* Priority must be 0 */
1502         if (priority != 0)
1503                 return -rte_tm_error_set(error,
1504                         EINVAL,
1505                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1506                         NULL,
1507                         rte_strerror(EINVAL));
1508
1509         /* Weight must be 1 */
1510         if (weight != 1)
1511                 return -rte_tm_error_set(error,
1512                         EINVAL,
1513                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1514                         NULL,
1515                         rte_strerror(EINVAL));
1516
1517         /* Shaper must be valid */
1518         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1519                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1520                 return -rte_tm_error_set(error,
1521                         EINVAL,
1522                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1523                         NULL,
1524                         rte_strerror(EINVAL));
1525
1526         /* No shared shapers */
1527         if (params->n_shared_shapers != 0)
1528                 return -rte_tm_error_set(error,
1529                         EINVAL,
1530                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1531                         NULL,
1532                         rte_strerror(EINVAL));
1533
1534         /* Number of SP priorities must be 1 */
1535         if (params->nonleaf.n_sp_priorities != 1)
1536                 return -rte_tm_error_set(error,
1537                         EINVAL,
1538                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1539                         NULL,
1540                         rte_strerror(EINVAL));
1541
1542         /* Stats */
1543         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1544                 return -rte_tm_error_set(error,
1545                         EINVAL,
1546                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1547                         NULL,
1548                         rte_strerror(EINVAL));
1549
1550         return 0;
1551 }
1552
1553 static int
1554 node_add_check_pipe(struct rte_eth_dev *dev,
1555         uint32_t node_id,
1556         uint32_t parent_node_id __rte_unused,
1557         uint32_t priority,
1558         uint32_t weight __rte_unused,
1559         uint32_t level_id __rte_unused,
1560         struct rte_tm_node_params *params,
1561         struct rte_tm_error *error)
1562 {
1563         struct pmd_internals *p = dev->data->dev_private;
1564
1565         /* node type: non-leaf */
1566         if (node_id < p->params.tm.n_queues)
1567                 return -rte_tm_error_set(error,
1568                         EINVAL,
1569                         RTE_TM_ERROR_TYPE_NODE_ID,
1570                         NULL,
1571                         rte_strerror(EINVAL));
1572
1573         /* Priority must be 0 */
1574         if (priority != 0)
1575                 return -rte_tm_error_set(error,
1576                         EINVAL,
1577                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1578                         NULL,
1579                         rte_strerror(EINVAL));
1580
1581         /* Shaper must be valid */
1582         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1583                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1584                 return -rte_tm_error_set(error,
1585                         EINVAL,
1586                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1587                         NULL,
1588                         rte_strerror(EINVAL));
1589
1590         /* No shared shapers */
1591         if (params->n_shared_shapers != 0)
1592                 return -rte_tm_error_set(error,
1593                         EINVAL,
1594                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1595                         NULL,
1596                         rte_strerror(EINVAL));
1597
1598         /* Number of SP priorities must be 4 */
1599         if (params->nonleaf.n_sp_priorities !=
1600                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1601                 return -rte_tm_error_set(error,
1602                         EINVAL,
1603                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1604                         NULL,
1605                         rte_strerror(EINVAL));
1606
1607         /* WFQ mode must be byte mode */
1608         if (params->nonleaf.wfq_weight_mode != NULL &&
1609                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1610                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1611                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1612                 params->nonleaf.wfq_weight_mode[3] != 0)
1613                 return -rte_tm_error_set(error,
1614                         EINVAL,
1615                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1616                         NULL,
1617                         rte_strerror(EINVAL));
1618
1619         /* Stats */
1620         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1621                 return -rte_tm_error_set(error,
1622                         EINVAL,
1623                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1624                         NULL,
1625                         rte_strerror(EINVAL));
1626
1627         return 0;
1628 }
1629
1630 static int
1631 node_add_check_tc(struct rte_eth_dev *dev,
1632         uint32_t node_id,
1633         uint32_t parent_node_id __rte_unused,
1634         uint32_t priority __rte_unused,
1635         uint32_t weight,
1636         uint32_t level_id __rte_unused,
1637         struct rte_tm_node_params *params,
1638         struct rte_tm_error *error)
1639 {
1640         struct pmd_internals *p = dev->data->dev_private;
1641
1642         /* node type: non-leaf */
1643         if (node_id < p->params.tm.n_queues)
1644                 return -rte_tm_error_set(error,
1645                         EINVAL,
1646                         RTE_TM_ERROR_TYPE_NODE_ID,
1647                         NULL,
1648                         rte_strerror(EINVAL));
1649
1650         /* Weight must be 1 */
1651         if (weight != 1)
1652                 return -rte_tm_error_set(error,
1653                         EINVAL,
1654                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1655                         NULL,
1656                         rte_strerror(EINVAL));
1657
1658         /* Shaper must be valid */
1659         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1660                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1661                 return -rte_tm_error_set(error,
1662                         EINVAL,
1663                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1664                         NULL,
1665                         rte_strerror(EINVAL));
1666
1667         /* Single valid shared shaper */
1668         if (params->n_shared_shapers > 1)
1669                 return -rte_tm_error_set(error,
1670                         EINVAL,
1671                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1672                         NULL,
1673                         rte_strerror(EINVAL));
1674
1675         if (params->n_shared_shapers == 1 &&
1676                 (params->shared_shaper_id == NULL ||
1677                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1678                 return -rte_tm_error_set(error,
1679                         EINVAL,
1680                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1681                         NULL,
1682                         rte_strerror(EINVAL));
1683
1684         /* Number of priorities must be 1 */
1685         if (params->nonleaf.n_sp_priorities != 1)
1686                 return -rte_tm_error_set(error,
1687                         EINVAL,
1688                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1689                         NULL,
1690                         rte_strerror(EINVAL));
1691
1692         /* Stats */
1693         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1694                 return -rte_tm_error_set(error,
1695                         EINVAL,
1696                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1697                         NULL,
1698                         rte_strerror(EINVAL));
1699
1700         return 0;
1701 }
1702
1703 static int
1704 node_add_check_queue(struct rte_eth_dev *dev,
1705         uint32_t node_id,
1706         uint32_t parent_node_id __rte_unused,
1707         uint32_t priority,
1708         uint32_t weight __rte_unused,
1709         uint32_t level_id __rte_unused,
1710         struct rte_tm_node_params *params,
1711         struct rte_tm_error *error)
1712 {
1713         struct pmd_internals *p = dev->data->dev_private;
1714
1715         /* node type: leaf */
1716         if (node_id >= p->params.tm.n_queues)
1717                 return -rte_tm_error_set(error,
1718                         EINVAL,
1719                         RTE_TM_ERROR_TYPE_NODE_ID,
1720                         NULL,
1721                         rte_strerror(EINVAL));
1722
1723         /* Priority must be 0 */
1724         if (priority != 0)
1725                 return -rte_tm_error_set(error,
1726                         EINVAL,
1727                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1728                         NULL,
1729                         rte_strerror(EINVAL));
1730
1731         /* No shaper */
1732         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1733                 return -rte_tm_error_set(error,
1734                         EINVAL,
1735                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1736                         NULL,
1737                         rte_strerror(EINVAL));
1738
1739         /* No shared shapers */
1740         if (params->n_shared_shapers != 0)
1741                 return -rte_tm_error_set(error,
1742                         EINVAL,
1743                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1744                         NULL,
1745                         rte_strerror(EINVAL));
1746
1747         /* Congestion management must not be head drop */
1748         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1749                 return -rte_tm_error_set(error,
1750                         EINVAL,
1751                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1752                         NULL,
1753                         rte_strerror(EINVAL));
1754
1755         /* Congestion management set to WRED */
1756         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1757                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1758                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1759                         wred_profile_id);
1760
1761                 /* WRED profile (for private WRED context) must be valid */
1762                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1763                         wp == NULL)
1764                         return -rte_tm_error_set(error,
1765                                 EINVAL,
1766                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1767                                 NULL,
1768                                 rte_strerror(EINVAL));
1769
1770                 /* No shared WRED contexts */
1771                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1772                         return -rte_tm_error_set(error,
1773                                 EINVAL,
1774                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1775                                 NULL,
1776                                 rte_strerror(EINVAL));
1777         }
1778
1779         /* Stats */
1780         if (params->stats_mask & ~STATS_MASK_QUEUE)
1781                 return -rte_tm_error_set(error,
1782                         EINVAL,
1783                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1784                         NULL,
1785                         rte_strerror(EINVAL));
1786
1787         return 0;
1788 }
1789
1790 static int
1791 node_add_check(struct rte_eth_dev *dev,
1792         uint32_t node_id,
1793         uint32_t parent_node_id,
1794         uint32_t priority,
1795         uint32_t weight,
1796         uint32_t level_id,
1797         struct rte_tm_node_params *params,
1798         struct rte_tm_error *error)
1799 {
1800         struct tm_node *pn;
1801         uint32_t level;
1802         int status;
1803
1804         /* node_id, parent_node_id:
1805          *    -node_id must not be RTE_TM_NODE_ID_NULL
1806          *    -node_id must not be in use
1807          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1808          *        -root node must not exist
1809          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1810          *        -parent_node_id must be valid
1811          */
1812         if (node_id == RTE_TM_NODE_ID_NULL)
1813                 return -rte_tm_error_set(error,
1814                         EINVAL,
1815                         RTE_TM_ERROR_TYPE_NODE_ID,
1816                         NULL,
1817                         rte_strerror(EINVAL));
1818
1819         if (tm_node_search(dev, node_id))
1820                 return -rte_tm_error_set(error,
1821                         EEXIST,
1822                         RTE_TM_ERROR_TYPE_NODE_ID,
1823                         NULL,
1824                         rte_strerror(EEXIST));
1825
1826         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1827                 pn = NULL;
1828                 if (tm_root_node_present(dev))
1829                         return -rte_tm_error_set(error,
1830                                 EEXIST,
1831                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1832                                 NULL,
1833                                 rte_strerror(EEXIST));
1834         } else {
1835                 pn = tm_node_search(dev, parent_node_id);
1836                 if (pn == NULL)
1837                         return -rte_tm_error_set(error,
1838                                 EINVAL,
1839                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1840                                 NULL,
1841                                 rte_strerror(EINVAL));
1842         }
1843
1844         /* priority: must be 0 .. 3 */
1845         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1846                 return -rte_tm_error_set(error,
1847                         EINVAL,
1848                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1849                         NULL,
1850                         rte_strerror(EINVAL));
1851
1852         /* weight: must be 1 .. 255 */
1853         if (weight == 0 || weight >= UINT8_MAX)
1854                 return -rte_tm_error_set(error,
1855                         EINVAL,
1856                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1857                         NULL,
1858                         rte_strerror(EINVAL));
1859
1860         /* level_id: if valid, then
1861          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1862          *        -level_id must be zero
1863          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1864          *        -level_id must be parent level ID plus one
1865          */
1866         level = (pn == NULL) ? 0 : pn->level + 1;
1867         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1868                 return -rte_tm_error_set(error,
1869                         EINVAL,
1870                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1871                         NULL,
1872                         rte_strerror(EINVAL));
1873
1874         /* params: must not be NULL */
1875         if (params == NULL)
1876                 return -rte_tm_error_set(error,
1877                         EINVAL,
1878                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1879                         NULL,
1880                         rte_strerror(EINVAL));
1881
1882         /* params: per level checks */
1883         switch (level) {
1884         case TM_NODE_LEVEL_PORT:
1885                 status = node_add_check_port(dev, node_id,
1886                         parent_node_id, priority, weight, level_id,
1887                         params, error);
1888                 if (status)
1889                         return status;
1890                 break;
1891
1892         case TM_NODE_LEVEL_SUBPORT:
1893                 status = node_add_check_subport(dev, node_id,
1894                         parent_node_id, priority, weight, level_id,
1895                         params, error);
1896                 if (status)
1897                         return status;
1898                 break;
1899
1900         case TM_NODE_LEVEL_PIPE:
1901                 status = node_add_check_pipe(dev, node_id,
1902                         parent_node_id, priority, weight, level_id,
1903                         params, error);
1904                 if (status)
1905                         return status;
1906                 break;
1907
1908         case TM_NODE_LEVEL_TC:
1909                 status = node_add_check_tc(dev, node_id,
1910                         parent_node_id, priority, weight, level_id,
1911                         params, error);
1912                 if (status)
1913                         return status;
1914                 break;
1915
1916         case TM_NODE_LEVEL_QUEUE:
1917                 status = node_add_check_queue(dev, node_id,
1918                         parent_node_id, priority, weight, level_id,
1919                         params, error);
1920                 if (status)
1921                         return status;
1922                 break;
1923
1924         default:
1925                 return -rte_tm_error_set(error,
1926                         EINVAL,
1927                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1928                         NULL,
1929                         rte_strerror(EINVAL));
1930         }
1931
1932         return 0;
1933 }
1934
1935 /* Traffic manager node add */
1936 static int
1937 pmd_tm_node_add(struct rte_eth_dev *dev,
1938         uint32_t node_id,
1939         uint32_t parent_node_id,
1940         uint32_t priority,
1941         uint32_t weight,
1942         uint32_t level_id,
1943         struct rte_tm_node_params *params,
1944         struct rte_tm_error *error)
1945 {
1946         struct pmd_internals *p = dev->data->dev_private;
1947         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1948         struct tm_node *n;
1949         uint32_t i;
1950         int status;
1951
1952         /* Checks */
1953         if (p->soft.tm.hierarchy_frozen)
1954                 return -rte_tm_error_set(error,
1955                         EBUSY,
1956                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1957                         NULL,
1958                         rte_strerror(EBUSY));
1959
1960         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1961                 level_id, params, error);
1962         if (status)
1963                 return status;
1964
1965         /* Memory allocation */
1966         n = calloc(1, sizeof(struct tm_node));
1967         if (n == NULL)
1968                 return -rte_tm_error_set(error,
1969                         ENOMEM,
1970                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1971                         NULL,
1972                         rte_strerror(ENOMEM));
1973
1974         /* Fill in */
1975         n->node_id = node_id;
1976         n->parent_node_id = parent_node_id;
1977         n->priority = priority;
1978         n->weight = weight;
1979
1980         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1981                 n->parent_node = tm_node_search(dev, parent_node_id);
1982                 n->level = n->parent_node->level + 1;
1983         }
1984
1985         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1986                 n->shaper_profile = tm_shaper_profile_search(dev,
1987                         params->shaper_profile_id);
1988
1989         if (n->level == TM_NODE_LEVEL_QUEUE &&
1990                 params->leaf.cman == RTE_TM_CMAN_WRED)
1991                 n->wred_profile = tm_wred_profile_search(dev,
1992                         params->leaf.wred.wred_profile_id);
1993
1994         memcpy(&n->params, params, sizeof(n->params));
1995
1996         /* Add to list */
1997         TAILQ_INSERT_TAIL(nl, n, node);
1998         p->soft.tm.h.n_nodes++;
1999
2000         /* Update dependencies */
2001         if (n->parent_node)
2002                 n->parent_node->n_children++;
2003
2004         if (n->shaper_profile)
2005                 n->shaper_profile->n_users++;
2006
2007         for (i = 0; i < params->n_shared_shapers; i++) {
2008                 struct tm_shared_shaper *ss;
2009
2010                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
2011                 ss->n_users++;
2012         }
2013
2014         if (n->wred_profile)
2015                 n->wred_profile->n_users++;
2016
2017         p->soft.tm.h.n_tm_nodes[n->level]++;
2018
2019         return 0;
2020 }
2021
2022 /* Traffic manager node delete */
2023 static int
2024 pmd_tm_node_delete(struct rte_eth_dev *dev,
2025         uint32_t node_id,
2026         struct rte_tm_error *error)
2027 {
2028         struct pmd_internals *p = dev->data->dev_private;
2029         struct tm_node *n;
2030         uint32_t i;
2031
2032         /* Check hierarchy changes are currently allowed */
2033         if (p->soft.tm.hierarchy_frozen)
2034                 return -rte_tm_error_set(error,
2035                         EBUSY,
2036                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2037                         NULL,
2038                         rte_strerror(EBUSY));
2039
2040         /* Check existing */
2041         n = tm_node_search(dev, node_id);
2042         if (n == NULL)
2043                 return -rte_tm_error_set(error,
2044                         EINVAL,
2045                         RTE_TM_ERROR_TYPE_NODE_ID,
2046                         NULL,
2047                         rte_strerror(EINVAL));
2048
2049         /* Check unused */
2050         if (n->n_children)
2051                 return -rte_tm_error_set(error,
2052                         EBUSY,
2053                         RTE_TM_ERROR_TYPE_NODE_ID,
2054                         NULL,
2055                         rte_strerror(EBUSY));
2056
2057         /* Update dependencies */
2058         p->soft.tm.h.n_tm_nodes[n->level]--;
2059
2060         if (n->wred_profile)
2061                 n->wred_profile->n_users--;
2062
2063         for (i = 0; i < n->params.n_shared_shapers; i++) {
2064                 struct tm_shared_shaper *ss;
2065
2066                 ss = tm_shared_shaper_search(dev,
2067                                 n->params.shared_shaper_id[i]);
2068                 ss->n_users--;
2069         }
2070
2071         if (n->shaper_profile)
2072                 n->shaper_profile->n_users--;
2073
2074         if (n->parent_node)
2075                 n->parent_node->n_children--;
2076
2077         /* Remove from list */
2078         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2079         p->soft.tm.h.n_nodes--;
2080         free(n);
2081
2082         return 0;
2083 }
2084
2085
2086 static void
2087 pipe_profile_build(struct rte_eth_dev *dev,
2088         struct tm_node *np,
2089         struct rte_sched_pipe_params *pp)
2090 {
2091         struct pmd_internals *p = dev->data->dev_private;
2092         struct tm_hierarchy *h = &p->soft.tm.h;
2093         struct tm_node_list *nl = &h->nodes;
2094         struct tm_node *nt, *nq;
2095
2096         memset(pp, 0, sizeof(*pp));
2097
2098         /* Pipe */
2099         pp->tb_rate = np->shaper_profile->params.peak.rate;
2100         pp->tb_size = np->shaper_profile->params.peak.size;
2101
2102         /* Traffic Class (TC) */
2103         pp->tc_period = PIPE_TC_PERIOD;
2104
2105         pp->tc_ov_weight = np->weight;
2106
2107         TAILQ_FOREACH(nt, nl, node) {
2108                 uint32_t queue_id = 0;
2109
2110                 if (nt->level != TM_NODE_LEVEL_TC ||
2111                         nt->parent_node_id != np->node_id)
2112                         continue;
2113
2114                 pp->tc_rate[nt->priority] =
2115                         nt->shaper_profile->params.peak.rate;
2116
2117                 /* Queue */
2118                 TAILQ_FOREACH(nq, nl, node) {
2119
2120                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2121                                 nq->parent_node_id != nt->node_id)
2122                                 continue;
2123
2124                         if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2125                                 pp->wrr_weights[queue_id] = nq->weight;
2126
2127                         queue_id++;
2128                 }
2129         }
2130 }
2131
2132 static int
2133 pipe_profile_free_exists(struct rte_eth_dev *dev,
2134         uint32_t *pipe_profile_id)
2135 {
2136         struct pmd_internals *p = dev->data->dev_private;
2137         struct tm_params *t = &p->soft.tm.params;
2138
2139         if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2140                 *pipe_profile_id = t->n_pipe_profiles;
2141                 return 1;
2142         }
2143
2144         return 0;
2145 }
2146
2147 static int
2148 pipe_profile_exists(struct rte_eth_dev *dev,
2149         struct rte_sched_pipe_params *pp,
2150         uint32_t *pipe_profile_id)
2151 {
2152         struct pmd_internals *p = dev->data->dev_private;
2153         struct tm_params *t = &p->soft.tm.params;
2154         uint32_t i;
2155
2156         for (i = 0; i < t->n_pipe_profiles; i++)
2157                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2158                         if (pipe_profile_id)
2159                                 *pipe_profile_id = i;
2160                         return 1;
2161                 }
2162
2163         return 0;
2164 }
2165
2166 static void
2167 pipe_profile_install(struct rte_eth_dev *dev,
2168         struct rte_sched_pipe_params *pp,
2169         uint32_t pipe_profile_id)
2170 {
2171         struct pmd_internals *p = dev->data->dev_private;
2172         struct tm_params *t = &p->soft.tm.params;
2173
2174         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2175         t->n_pipe_profiles++;
2176 }
2177
2178 static void
2179 pipe_profile_mark(struct rte_eth_dev *dev,
2180         uint32_t subport_id,
2181         uint32_t pipe_id,
2182         uint32_t pipe_profile_id)
2183 {
2184         struct pmd_internals *p = dev->data->dev_private;
2185         struct tm_hierarchy *h = &p->soft.tm.h;
2186         struct tm_params *t = &p->soft.tm.params;
2187         uint32_t n_pipes_per_subport, pos;
2188
2189         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2190                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2191         pos = subport_id * n_pipes_per_subport + pipe_id;
2192
2193         t->pipe_to_profile[pos] = pipe_profile_id;
2194 }
2195
2196 static struct rte_sched_pipe_params *
2197 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2198 {
2199         struct pmd_internals *p = dev->data->dev_private;
2200         struct tm_hierarchy *h = &p->soft.tm.h;
2201         struct tm_params *t = &p->soft.tm.params;
2202         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2203                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2204
2205         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2206         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2207
2208         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2209         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2210
2211         return &t->pipe_profiles[pipe_profile_id];
2212 }
2213
2214 static int
2215 pipe_profiles_generate(struct rte_eth_dev *dev)
2216 {
2217         struct pmd_internals *p = dev->data->dev_private;
2218         struct tm_hierarchy *h = &p->soft.tm.h;
2219         struct tm_node_list *nl = &h->nodes;
2220         struct tm_node *ns, *np;
2221         uint32_t subport_id;
2222
2223         /* Objective: Fill in the following fields in struct tm_params:
2224          *    - pipe_profiles
2225          *    - n_pipe_profiles
2226          *    - pipe_to_profile
2227          */
2228
2229         subport_id = 0;
2230         TAILQ_FOREACH(ns, nl, node) {
2231                 uint32_t pipe_id;
2232
2233                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2234                         continue;
2235
2236                 pipe_id = 0;
2237                 TAILQ_FOREACH(np, nl, node) {
2238                         struct rte_sched_pipe_params pp;
2239                         uint32_t pos;
2240
2241                         if (np->level != TM_NODE_LEVEL_PIPE ||
2242                                 np->parent_node_id != ns->node_id)
2243                                 continue;
2244
2245                         pipe_profile_build(dev, np, &pp);
2246
2247                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2248                                 if (!pipe_profile_free_exists(dev, &pos))
2249                                         return -1;
2250
2251                                 pipe_profile_install(dev, &pp, pos);
2252                         }
2253
2254                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2255
2256                         pipe_id++;
2257                 }
2258
2259                 subport_id++;
2260         }
2261
2262         return 0;
2263 }
2264
2265 static struct tm_wred_profile *
2266 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2267 {
2268         struct pmd_internals *p = dev->data->dev_private;
2269         struct tm_hierarchy *h = &p->soft.tm.h;
2270         struct tm_node_list *nl = &h->nodes;
2271         struct tm_node *nq;
2272
2273         TAILQ_FOREACH(nq, nl, node) {
2274                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2275                         nq->parent_node->priority != tc_id)
2276                         continue;
2277
2278                 return nq->wred_profile;
2279         }
2280
2281         return NULL;
2282 }
2283
2284 #ifdef RTE_SCHED_RED
2285
2286 static void
2287 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2288 {
2289         struct pmd_internals *p = dev->data->dev_private;
2290         struct rte_sched_subport_params *pp =
2291                 &p->soft.tm.params.subport_params[subport_id];
2292
2293         uint32_t tc_id;
2294         enum rte_color color;
2295
2296         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2297                 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2298                         struct rte_red_params *dst =
2299                                 &pp->red_params[tc_id][color];
2300                         struct tm_wred_profile *src_wp =
2301                                 tm_tc_wred_profile_get(dev, tc_id);
2302                         struct rte_tm_red_params *src =
2303                                 &src_wp->params.red_params[color];
2304
2305                         memcpy(dst, src, sizeof(*dst));
2306                 }
2307 }
2308
2309 #else
2310
2311 #define wred_profiles_set(dev, subport_id)
2312
2313 #endif
2314
2315 static struct tm_shared_shaper *
2316 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2317 {
2318         return (tc_node->params.n_shared_shapers) ?
2319                 tm_shared_shaper_search(dev,
2320                         tc_node->params.shared_shaper_id[0]) :
2321                 NULL;
2322 }
2323
2324 static struct tm_shared_shaper *
2325 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2326         struct tm_node *subport_node,
2327         uint32_t tc_id)
2328 {
2329         struct pmd_internals *p = dev->data->dev_private;
2330         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2331         struct tm_node *n;
2332
2333         TAILQ_FOREACH(n, nl, node) {
2334                 if (n->level != TM_NODE_LEVEL_TC ||
2335                         n->parent_node->parent_node_id !=
2336                                 subport_node->node_id ||
2337                         n->priority != tc_id)
2338                         continue;
2339
2340                 return tm_tc_shared_shaper_get(dev, n);
2341         }
2342
2343         return NULL;
2344 }
2345
2346 static int
2347 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2348 {
2349         struct pmd_internals *p = dev->data->dev_private;
2350         struct tm_hierarchy *h = &p->soft.tm.h;
2351         struct tm_node_list *nl = &h->nodes;
2352         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2353         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2354         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2355         struct tm_shared_shaper *ss;
2356
2357         uint32_t n_pipes_per_subport;
2358
2359         /* Root node exists. */
2360         if (nr == NULL)
2361                 return -rte_tm_error_set(error,
2362                         EINVAL,
2363                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2364                         NULL,
2365                         rte_strerror(EINVAL));
2366
2367         /* There is at least one subport, max is not exceeded. */
2368         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2369                 return -rte_tm_error_set(error,
2370                         EINVAL,
2371                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2372                         NULL,
2373                         rte_strerror(EINVAL));
2374
2375         /* There is at least one pipe. */
2376         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2377                 return -rte_tm_error_set(error,
2378                         EINVAL,
2379                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2380                         NULL,
2381                         rte_strerror(EINVAL));
2382
2383         /* Number of pipes is the same for all subports. Maximum number of pipes
2384          * per subport is not exceeded.
2385          */
2386         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2387                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2388
2389         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2390                 return -rte_tm_error_set(error,
2391                         EINVAL,
2392                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2393                         NULL,
2394                         rte_strerror(EINVAL));
2395
2396         TAILQ_FOREACH(ns, nl, node) {
2397                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2398                         continue;
2399
2400                 if (ns->n_children != n_pipes_per_subport)
2401                         return -rte_tm_error_set(error,
2402                                 EINVAL,
2403                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2404                                 NULL,
2405                                 rte_strerror(EINVAL));
2406         }
2407
2408         /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2409         TAILQ_FOREACH(np, nl, node) {
2410                 uint32_t mask = 0, mask_expected =
2411                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2412                                 uint32_t);
2413
2414                 if (np->level != TM_NODE_LEVEL_PIPE)
2415                         continue;
2416
2417                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2418                         return -rte_tm_error_set(error,
2419                                 EINVAL,
2420                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2421                                 NULL,
2422                                 rte_strerror(EINVAL));
2423
2424                 TAILQ_FOREACH(nt, nl, node) {
2425                         if (nt->level != TM_NODE_LEVEL_TC ||
2426                                 nt->parent_node_id != np->node_id)
2427                                 continue;
2428
2429                         mask |= 1 << nt->priority;
2430                 }
2431
2432                 if (mask != mask_expected)
2433                         return -rte_tm_error_set(error,
2434                                 EINVAL,
2435                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2436                                 NULL,
2437                                 rte_strerror(EINVAL));
2438         }
2439
2440         /** Each Strict priority TC has exactly 1 packet queues while
2441          *      lowest priority TC (Best-effort) has 4 queues.
2442          */
2443         TAILQ_FOREACH(nt, nl, node) {
2444                 if (nt->level != TM_NODE_LEVEL_TC)
2445                         continue;
2446
2447                 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2448                         return -rte_tm_error_set(error,
2449                                 EINVAL,
2450                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2451                                 NULL,
2452                                 rte_strerror(EINVAL));
2453         }
2454
2455         /**
2456          * Shared shapers:
2457          *    -For each TC #i, all pipes in the same subport use the same
2458          *     shared shaper (or no shared shaper) for their TC#i.
2459          *    -Each shared shaper needs to have at least one user. All its
2460          *     users have to be TC nodes with the same priority and the same
2461          *     subport.
2462          */
2463         TAILQ_FOREACH(ns, nl, node) {
2464                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2465                 uint32_t id;
2466
2467                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2468                         continue;
2469
2470                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2471                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2472
2473                 TAILQ_FOREACH(nt, nl, node) {
2474                         struct tm_shared_shaper *subport_ss, *tc_ss;
2475
2476                         if (nt->level != TM_NODE_LEVEL_TC ||
2477                                 nt->parent_node->parent_node_id !=
2478                                         ns->node_id)
2479                                 continue;
2480
2481                         subport_ss = s[nt->priority];
2482                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2483
2484                         if (subport_ss == NULL && tc_ss == NULL)
2485                                 continue;
2486
2487                         if ((subport_ss == NULL && tc_ss != NULL) ||
2488                                 (subport_ss != NULL && tc_ss == NULL) ||
2489                                 subport_ss->shared_shaper_id !=
2490                                         tc_ss->shared_shaper_id)
2491                                 return -rte_tm_error_set(error,
2492                                         EINVAL,
2493                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2494                                         NULL,
2495                                         rte_strerror(EINVAL));
2496                 }
2497         }
2498
2499         TAILQ_FOREACH(ss, ssl, node) {
2500                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2501                 uint32_t n_users = 0;
2502
2503                 if (nt_any != NULL)
2504                         TAILQ_FOREACH(nt, nl, node) {
2505                                 if (nt->level != TM_NODE_LEVEL_TC ||
2506                                         nt->priority != nt_any->priority ||
2507                                         nt->parent_node->parent_node_id !=
2508                                         nt_any->parent_node->parent_node_id)
2509                                         continue;
2510
2511                                 n_users++;
2512                         }
2513
2514                 if (ss->n_users == 0 || ss->n_users != n_users)
2515                         return -rte_tm_error_set(error,
2516                                 EINVAL,
2517                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2518                                 NULL,
2519                                 rte_strerror(EINVAL));
2520         }
2521
2522         /* Not too many pipe profiles. */
2523         if (pipe_profiles_generate(dev))
2524                 return -rte_tm_error_set(error,
2525                         EINVAL,
2526                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2527                         NULL,
2528                         rte_strerror(EINVAL));
2529
2530         /**
2531          * WRED (when used, i.e. at least one WRED profile defined):
2532          *    -Each WRED profile must have at least one user.
2533          *    -All leaf nodes must have their private WRED context enabled.
2534          *    -For each TC #i, all leaf nodes must use the same WRED profile
2535          *     for their private WRED context.
2536          */
2537         if (h->n_wred_profiles) {
2538                 struct tm_wred_profile *wp;
2539                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2540                 uint32_t id;
2541
2542                 TAILQ_FOREACH(wp, wpl, node)
2543                         if (wp->n_users == 0)
2544                                 return -rte_tm_error_set(error,
2545                                         EINVAL,
2546                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2547                                         NULL,
2548                                         rte_strerror(EINVAL));
2549
2550                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2551                         w[id] = tm_tc_wred_profile_get(dev, id);
2552
2553                         if (w[id] == NULL)
2554                                 return -rte_tm_error_set(error,
2555                                         EINVAL,
2556                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2557                                         NULL,
2558                                         rte_strerror(EINVAL));
2559                 }
2560
2561                 TAILQ_FOREACH(nq, nl, node) {
2562                         uint32_t id;
2563
2564                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2565                                 continue;
2566
2567                         id = nq->parent_node->priority;
2568
2569                         if (nq->wred_profile == NULL ||
2570                                 nq->wred_profile->wred_profile_id !=
2571                                         w[id]->wred_profile_id)
2572                                 return -rte_tm_error_set(error,
2573                                         EINVAL,
2574                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2575                                         NULL,
2576                                         rte_strerror(EINVAL));
2577                 }
2578         }
2579
2580         return 0;
2581 }
2582
2583 static void
2584 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2585 {
2586         struct pmd_internals *p = dev->data->dev_private;
2587         struct tm_params *t = &p->soft.tm.params;
2588         struct tm_hierarchy *h = &p->soft.tm.h;
2589
2590         struct tm_node_list *nl = &h->nodes;
2591         struct tm_node *root = tm_root_node_present(dev), *n;
2592
2593         uint32_t subport_id;
2594
2595         t->port_params = (struct rte_sched_port_params) {
2596                 .name = dev->data->name,
2597                 .socket = dev->data->numa_node,
2598                 .rate = root->shaper_profile->params.peak.rate,
2599                 .mtu = dev->data->mtu,
2600                 .frame_overhead =
2601                         root->shaper_profile->params.pkt_length_adjust,
2602                 .n_subports_per_port = root->n_children,
2603                 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2604         };
2605
2606         subport_id = 0;
2607         TAILQ_FOREACH(n, nl, node) {
2608                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2609                 uint32_t i;
2610
2611                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2612                         continue;
2613
2614                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2615                         struct tm_shared_shaper *ss;
2616                         struct tm_shaper_profile *sp;
2617
2618                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2619                         sp = (ss) ? tm_shaper_profile_search(dev,
2620                                 ss->shaper_profile_id) :
2621                                 n->shaper_profile;
2622                         tc_rate[i] = sp->params.peak.rate;
2623                 }
2624
2625                 t->subport_params[subport_id] =
2626                         (struct rte_sched_subport_params) {
2627                                 .tb_rate = n->shaper_profile->params.peak.rate,
2628                                 .tb_size = n->shaper_profile->params.peak.size,
2629
2630                                 .tc_rate = {tc_rate[0],
2631                                         tc_rate[1],
2632                                         tc_rate[2],
2633                                         tc_rate[3],
2634                                         tc_rate[4],
2635                                         tc_rate[5],
2636                                         tc_rate[6],
2637                                         tc_rate[7],
2638                                         tc_rate[8],
2639                                         tc_rate[9],
2640                                         tc_rate[10],
2641                                         tc_rate[11],
2642                                         tc_rate[12],
2643                                 },
2644                                 .tc_period = SUBPORT_TC_PERIOD,
2645                                 .n_pipes_per_subport_enabled =
2646                                         h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2647                                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2648                                 .qsize = {p->params.tm.qsize[0],
2649                                         p->params.tm.qsize[1],
2650                                         p->params.tm.qsize[2],
2651                                         p->params.tm.qsize[3],
2652                                         p->params.tm.qsize[4],
2653                                         p->params.tm.qsize[5],
2654                                         p->params.tm.qsize[6],
2655                                         p->params.tm.qsize[7],
2656                                         p->params.tm.qsize[8],
2657                                         p->params.tm.qsize[9],
2658                                         p->params.tm.qsize[10],
2659                                         p->params.tm.qsize[11],
2660                                         p->params.tm.qsize[12],
2661                                 },
2662                                 .pipe_profiles = t->pipe_profiles,
2663                                 .n_pipe_profiles = t->n_pipe_profiles,
2664                                 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2665                 };
2666                 wred_profiles_set(dev, subport_id);
2667                 subport_id++;
2668         }
2669 }
2670
2671 /* Traffic manager hierarchy commit */
2672 static int
2673 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2674         int clear_on_fail,
2675         struct rte_tm_error *error)
2676 {
2677         struct pmd_internals *p = dev->data->dev_private;
2678         int status;
2679
2680         /* Checks */
2681         if (p->soft.tm.hierarchy_frozen)
2682                 return -rte_tm_error_set(error,
2683                         EBUSY,
2684                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2685                         NULL,
2686                         rte_strerror(EBUSY));
2687
2688         status = hierarchy_commit_check(dev, error);
2689         if (status) {
2690                 if (clear_on_fail)
2691                         tm_hierarchy_free(p);
2692
2693                 return status;
2694         }
2695
2696         /* Create blueprints */
2697         hierarchy_blueprints_create(dev);
2698
2699         /* Freeze hierarchy */
2700         p->soft.tm.hierarchy_frozen = 1;
2701
2702         return 0;
2703 }
2704
2705 #ifdef RTE_SCHED_SUBPORT_TC_OV
2706
2707 static int
2708 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2709 {
2710         struct pmd_internals *p = dev->data->dev_private;
2711         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2712
2713         struct tm_node *ns = np->parent_node;
2714         uint32_t subport_id = tm_node_subport_id(dev, ns);
2715
2716         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2717         struct rte_sched_pipe_params profile1;
2718         uint32_t pipe_profile_id;
2719
2720         /* Derive new pipe profile. */
2721         memcpy(&profile1, profile0, sizeof(profile1));
2722         profile1.tc_ov_weight = (uint8_t)weight;
2723
2724         /* Since implementation does not allow adding more pipe profiles after
2725          * port configuration, the pipe configuration can be successfully
2726          * updated only if the new profile is also part of the existing set of
2727          * pipe profiles.
2728          */
2729         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2730                 return -1;
2731
2732         /* Update the pipe profile used by the current pipe. */
2733         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2734                 (int32_t)pipe_profile_id))
2735                 return -1;
2736
2737         /* Commit changes. */
2738         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2739         np->weight = weight;
2740
2741         return 0;
2742 }
2743
2744 #endif
2745
2746 static int
2747 update_queue_weight(struct rte_eth_dev *dev,
2748         struct tm_node *nq, uint32_t weight)
2749 {
2750         struct pmd_internals *p = dev->data->dev_private;
2751         uint32_t queue_id = tm_node_queue_id(dev, nq);
2752
2753         struct tm_node *nt = nq->parent_node;
2754
2755         struct tm_node *np = nt->parent_node;
2756         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2757
2758         struct tm_node *ns = np->parent_node;
2759         uint32_t subport_id = tm_node_subport_id(dev, ns);
2760
2761         uint32_t pipe_be_queue_id =
2762                 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2763
2764         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2765         struct rte_sched_pipe_params profile1;
2766         uint32_t pipe_profile_id;
2767
2768         /* Derive new pipe profile. */
2769         memcpy(&profile1, profile0, sizeof(profile1));
2770         profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2771
2772         /* Since implementation does not allow adding more pipe profiles after
2773          * port configuration, the pipe configuration can be successfully
2774          * updated only if the new profile is also part of the existing set
2775          * of pipe profiles.
2776          */
2777         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2778                 return -1;
2779
2780         /* Update the pipe profile used by the current pipe. */
2781         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2782                 (int32_t)pipe_profile_id))
2783                 return -1;
2784
2785         /* Commit changes. */
2786         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2787         nq->weight = weight;
2788
2789         return 0;
2790 }
2791
2792 /* Traffic manager node parent update */
2793 static int
2794 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2795         uint32_t node_id,
2796         uint32_t parent_node_id,
2797         uint32_t priority,
2798         uint32_t weight,
2799         struct rte_tm_error *error)
2800 {
2801         struct tm_node *n;
2802
2803         /* Port must be started and TM used. */
2804         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2805                 return -rte_tm_error_set(error,
2806                         EBUSY,
2807                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2808                         NULL,
2809                         rte_strerror(EBUSY));
2810
2811         /* Node must be valid */
2812         n = tm_node_search(dev, node_id);
2813         if (n == NULL)
2814                 return -rte_tm_error_set(error,
2815                         EINVAL,
2816                         RTE_TM_ERROR_TYPE_NODE_ID,
2817                         NULL,
2818                         rte_strerror(EINVAL));
2819
2820         /* Parent node must be the same */
2821         if (n->parent_node_id != parent_node_id)
2822                 return -rte_tm_error_set(error,
2823                         EINVAL,
2824                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2825                         NULL,
2826                         rte_strerror(EINVAL));
2827
2828         /* Priority must be the same */
2829         if (n->priority != priority)
2830                 return -rte_tm_error_set(error,
2831                         EINVAL,
2832                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2833                         NULL,
2834                         rte_strerror(EINVAL));
2835
2836         /* weight: must be 1 .. 255 */
2837         if (weight == 0 || weight >= UINT8_MAX)
2838                 return -rte_tm_error_set(error,
2839                         EINVAL,
2840                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2841                         NULL,
2842                         rte_strerror(EINVAL));
2843
2844         switch (n->level) {
2845         case TM_NODE_LEVEL_PORT:
2846                 return -rte_tm_error_set(error,
2847                         EINVAL,
2848                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2849                         NULL,
2850                         rte_strerror(EINVAL));
2851                 /* fall-through */
2852         case TM_NODE_LEVEL_SUBPORT:
2853                 return -rte_tm_error_set(error,
2854                         EINVAL,
2855                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2856                         NULL,
2857                         rte_strerror(EINVAL));
2858                 /* fall-through */
2859         case TM_NODE_LEVEL_PIPE:
2860 #ifdef RTE_SCHED_SUBPORT_TC_OV
2861                 if (update_pipe_weight(dev, n, weight))
2862                         return -rte_tm_error_set(error,
2863                                 EINVAL,
2864                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2865                                 NULL,
2866                                 rte_strerror(EINVAL));
2867                 return 0;
2868 #else
2869                 return -rte_tm_error_set(error,
2870                         EINVAL,
2871                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2872                         NULL,
2873                         rte_strerror(EINVAL));
2874 #endif
2875                 /* fall-through */
2876         case TM_NODE_LEVEL_TC:
2877                 return -rte_tm_error_set(error,
2878                         EINVAL,
2879                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2880                         NULL,
2881                         rte_strerror(EINVAL));
2882                 /* fall-through */
2883         case TM_NODE_LEVEL_QUEUE:
2884                 /* fall-through */
2885         default:
2886                 if (update_queue_weight(dev, n, weight))
2887                         return -rte_tm_error_set(error,
2888                                 EINVAL,
2889                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2890                                 NULL,
2891                                 rte_strerror(EINVAL));
2892                 return 0;
2893         }
2894 }
2895
2896 static int
2897 update_subport_rate(struct rte_eth_dev *dev,
2898         struct tm_node *ns,
2899         struct tm_shaper_profile *sp)
2900 {
2901         struct pmd_internals *p = dev->data->dev_private;
2902         uint32_t subport_id = tm_node_subport_id(dev, ns);
2903
2904         struct rte_sched_subport_params subport_params;
2905
2906         /* Derive new subport configuration. */
2907         memcpy(&subport_params,
2908                 &p->soft.tm.params.subport_params[subport_id],
2909                 sizeof(subport_params));
2910         subport_params.tb_rate = sp->params.peak.rate;
2911         subport_params.tb_size = sp->params.peak.size;
2912
2913         /* Update the subport configuration. */
2914         if (rte_sched_subport_config(SCHED(p), subport_id,
2915                 &subport_params, 0))
2916                 return -1;
2917
2918         /* Commit changes. */
2919         ns->shaper_profile->n_users--;
2920
2921         ns->shaper_profile = sp;
2922         ns->params.shaper_profile_id = sp->shaper_profile_id;
2923         sp->n_users++;
2924
2925         memcpy(&p->soft.tm.params.subport_params[subport_id],
2926                 &subport_params,
2927                 sizeof(subport_params));
2928
2929         return 0;
2930 }
2931
2932 static int
2933 update_pipe_rate(struct rte_eth_dev *dev,
2934         struct tm_node *np,
2935         struct tm_shaper_profile *sp)
2936 {
2937         struct pmd_internals *p = dev->data->dev_private;
2938         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2939
2940         struct tm_node *ns = np->parent_node;
2941         uint32_t subport_id = tm_node_subport_id(dev, ns);
2942
2943         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2944         struct rte_sched_pipe_params profile1;
2945         uint32_t pipe_profile_id;
2946
2947         /* Derive new pipe profile. */
2948         memcpy(&profile1, profile0, sizeof(profile1));
2949         profile1.tb_rate = sp->params.peak.rate;
2950         profile1.tb_size = sp->params.peak.size;
2951
2952         /* Since implementation does not allow adding more pipe profiles after
2953          * port configuration, the pipe configuration can be successfully
2954          * updated only if the new profile is also part of the existing set of
2955          * pipe profiles.
2956          */
2957         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2958                 return -1;
2959
2960         /* Update the pipe profile used by the current pipe. */
2961         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2962                 (int32_t)pipe_profile_id))
2963                 return -1;
2964
2965         /* Commit changes. */
2966         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2967         np->shaper_profile->n_users--;
2968         np->shaper_profile = sp;
2969         np->params.shaper_profile_id = sp->shaper_profile_id;
2970         sp->n_users++;
2971
2972         return 0;
2973 }
2974
2975 static int
2976 update_tc_rate(struct rte_eth_dev *dev,
2977         struct tm_node *nt,
2978         struct tm_shaper_profile *sp)
2979 {
2980         struct pmd_internals *p = dev->data->dev_private;
2981         uint32_t tc_id = tm_node_tc_id(dev, nt);
2982
2983         struct tm_node *np = nt->parent_node;
2984         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2985
2986         struct tm_node *ns = np->parent_node;
2987         uint32_t subport_id = tm_node_subport_id(dev, ns);
2988
2989         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2990         struct rte_sched_pipe_params profile1;
2991         uint32_t pipe_profile_id;
2992
2993         /* Derive new pipe profile. */
2994         memcpy(&profile1, profile0, sizeof(profile1));
2995         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2996
2997         /* Since implementation does not allow adding more pipe profiles after
2998          * port configuration, the pipe configuration can be successfully
2999          * updated only if the new profile is also part of the existing set of
3000          * pipe profiles.
3001          */
3002         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3003                 return -1;
3004
3005         /* Update the pipe profile used by the current pipe. */
3006         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3007                 (int32_t)pipe_profile_id))
3008                 return -1;
3009
3010         /* Commit changes. */
3011         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3012         nt->shaper_profile->n_users--;
3013         nt->shaper_profile = sp;
3014         nt->params.shaper_profile_id = sp->shaper_profile_id;
3015         sp->n_users++;
3016
3017         return 0;
3018 }
3019
3020 /* Traffic manager node shaper update */
3021 static int
3022 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
3023         uint32_t node_id,
3024         uint32_t shaper_profile_id,
3025         struct rte_tm_error *error)
3026 {
3027         struct tm_node *n;
3028         struct tm_shaper_profile *sp;
3029
3030         /* Port must be started and TM used. */
3031         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3032                 return -rte_tm_error_set(error,
3033                         EBUSY,
3034                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3035                         NULL,
3036                         rte_strerror(EBUSY));
3037
3038         /* Node must be valid */
3039         n = tm_node_search(dev, node_id);
3040         if (n == NULL)
3041                 return -rte_tm_error_set(error,
3042                         EINVAL,
3043                         RTE_TM_ERROR_TYPE_NODE_ID,
3044                         NULL,
3045                         rte_strerror(EINVAL));
3046
3047         /* Shaper profile must be valid. */
3048         sp = tm_shaper_profile_search(dev, shaper_profile_id);
3049         if (sp == NULL)
3050                 return -rte_tm_error_set(error,
3051                         EINVAL,
3052                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
3053                         NULL,
3054                         rte_strerror(EINVAL));
3055
3056         switch (n->level) {
3057         case TM_NODE_LEVEL_PORT:
3058                 return -rte_tm_error_set(error,
3059                         EINVAL,
3060                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3061                         NULL,
3062                         rte_strerror(EINVAL));
3063                 /* fall-through */
3064         case TM_NODE_LEVEL_SUBPORT:
3065                 if (update_subport_rate(dev, n, sp))
3066                         return -rte_tm_error_set(error,
3067                                 EINVAL,
3068                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3069                                 NULL,
3070                                 rte_strerror(EINVAL));
3071                 return 0;
3072                 /* fall-through */
3073         case TM_NODE_LEVEL_PIPE:
3074                 if (update_pipe_rate(dev, n, sp))
3075                         return -rte_tm_error_set(error,
3076                                 EINVAL,
3077                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3078                                 NULL,
3079                                 rte_strerror(EINVAL));
3080                 return 0;
3081                 /* fall-through */
3082         case TM_NODE_LEVEL_TC:
3083                 if (update_tc_rate(dev, n, sp))
3084                         return -rte_tm_error_set(error,
3085                                 EINVAL,
3086                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3087                                 NULL,
3088                                 rte_strerror(EINVAL));
3089                 return 0;
3090                 /* fall-through */
3091         case TM_NODE_LEVEL_QUEUE:
3092                 /* fall-through */
3093         default:
3094                 return -rte_tm_error_set(error,
3095                         EINVAL,
3096                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3097                         NULL,
3098                         rte_strerror(EINVAL));
3099         }
3100 }
3101
3102 static inline uint32_t
3103 tm_port_queue_id(struct rte_eth_dev *dev,
3104         uint32_t port_subport_id,
3105         uint32_t subport_pipe_id,
3106         uint32_t pipe_tc_id,
3107         uint32_t tc_queue_id)
3108 {
3109         struct pmd_internals *p = dev->data->dev_private;
3110         struct tm_hierarchy *h = &p->soft.tm.h;
3111         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3112                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3113
3114         uint32_t port_pipe_id =
3115                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3116
3117         uint32_t port_queue_id =
3118                 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3119
3120         return port_queue_id;
3121 }
3122
3123 static int
3124 read_port_stats(struct rte_eth_dev *dev,
3125         struct tm_node *nr,
3126         struct rte_tm_node_stats *stats,
3127         uint64_t *stats_mask,
3128         int clear)
3129 {
3130         struct pmd_internals *p = dev->data->dev_private;
3131         struct tm_hierarchy *h = &p->soft.tm.h;
3132         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3133         uint32_t subport_id;
3134
3135         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3136                 struct rte_sched_subport_stats s;
3137                 uint32_t tc_ov, id;
3138
3139                 /* Stats read */
3140                 int status = rte_sched_subport_read_stats(SCHED(p),
3141                         subport_id,
3142                         &s,
3143                         &tc_ov);
3144                 if (status)
3145                         return status;
3146
3147                 /* Stats accumulate */
3148                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3149                         nr->stats.n_pkts +=
3150                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3151                         nr->stats.n_bytes +=
3152                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3153                         nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3154                                 s.n_pkts_tc_dropped[id];
3155                         nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3156                                 s.n_bytes_tc_dropped[id];
3157                 }
3158         }
3159
3160         /* Stats copy */
3161         if (stats)
3162                 memcpy(stats, &nr->stats, sizeof(*stats));
3163
3164         if (stats_mask)
3165                 *stats_mask = STATS_MASK_DEFAULT;
3166
3167         /* Stats clear */
3168         if (clear)
3169                 memset(&nr->stats, 0, sizeof(nr->stats));
3170
3171         return 0;
3172 }
3173
3174 static int
3175 read_subport_stats(struct rte_eth_dev *dev,
3176         struct tm_node *ns,
3177         struct rte_tm_node_stats *stats,
3178         uint64_t *stats_mask,
3179         int clear)
3180 {
3181         struct pmd_internals *p = dev->data->dev_private;
3182         uint32_t subport_id = tm_node_subport_id(dev, ns);
3183         struct rte_sched_subport_stats s;
3184         uint32_t tc_ov, tc_id;
3185
3186         /* Stats read */
3187         int status = rte_sched_subport_read_stats(SCHED(p),
3188                 subport_id,
3189                 &s,
3190                 &tc_ov);
3191         if (status)
3192                 return status;
3193
3194         /* Stats accumulate */
3195         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3196                 ns->stats.n_pkts +=
3197                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3198                 ns->stats.n_bytes +=
3199                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3200                 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3201                         s.n_pkts_tc_dropped[tc_id];
3202                 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3203                         s.n_bytes_tc_dropped[tc_id];
3204         }
3205
3206         /* Stats copy */
3207         if (stats)
3208                 memcpy(stats, &ns->stats, sizeof(*stats));
3209
3210         if (stats_mask)
3211                 *stats_mask = STATS_MASK_DEFAULT;
3212
3213         /* Stats clear */
3214         if (clear)
3215                 memset(&ns->stats, 0, sizeof(ns->stats));
3216
3217         return 0;
3218 }
3219
3220 static int
3221 read_pipe_stats(struct rte_eth_dev *dev,
3222         struct tm_node *np,
3223         struct rte_tm_node_stats *stats,
3224         uint64_t *stats_mask,
3225         int clear)
3226 {
3227         struct pmd_internals *p = dev->data->dev_private;
3228
3229         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3230
3231         struct tm_node *ns = np->parent_node;
3232         uint32_t subport_id = tm_node_subport_id(dev, ns);
3233         uint32_t tc_id, queue_id;
3234         uint32_t i;
3235
3236         /* Stats read */
3237         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3238                 struct rte_sched_queue_stats s;
3239                 uint16_t qlen;
3240
3241                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3242                         tc_id = i;
3243                         queue_id = i;
3244                 } else {
3245                         tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3246                         queue_id = i - tc_id;
3247                 }
3248
3249                 uint32_t qid = tm_port_queue_id(dev,
3250                         subport_id,
3251                         pipe_id,
3252                         tc_id,
3253                         queue_id);
3254
3255                 int status = rte_sched_queue_read_stats(SCHED(p),
3256                         qid,
3257                         &s,
3258                         &qlen);
3259                 if (status)
3260                         return status;
3261
3262                 /* Stats accumulate */
3263                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3264                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3265                 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3266                 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3267                         s.n_bytes_dropped;
3268                 np->stats.leaf.n_pkts_queued = qlen;
3269         }
3270
3271         /* Stats copy */
3272         if (stats)
3273                 memcpy(stats, &np->stats, sizeof(*stats));
3274
3275         if (stats_mask)
3276                 *stats_mask = STATS_MASK_DEFAULT;
3277
3278         /* Stats clear */
3279         if (clear)
3280                 memset(&np->stats, 0, sizeof(np->stats));
3281
3282         return 0;
3283 }
3284
3285 static int
3286 read_tc_stats(struct rte_eth_dev *dev,
3287         struct tm_node *nt,
3288         struct rte_tm_node_stats *stats,
3289         uint64_t *stats_mask,
3290         int clear)
3291 {
3292         struct pmd_internals *p = dev->data->dev_private;
3293
3294         uint32_t tc_id = tm_node_tc_id(dev, nt);
3295
3296         struct tm_node *np = nt->parent_node;
3297         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3298
3299         struct tm_node *ns = np->parent_node;
3300         uint32_t subport_id = tm_node_subport_id(dev, ns);
3301         struct rte_sched_queue_stats s;
3302         uint32_t qid, i;
3303         uint16_t qlen;
3304         int status;
3305
3306         /* Stats read */
3307         if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3308                 qid = tm_port_queue_id(dev,
3309                         subport_id,
3310                         pipe_id,
3311                         tc_id,
3312                         0);
3313
3314                 status = rte_sched_queue_read_stats(SCHED(p),
3315                         qid,
3316                         &s,
3317                         &qlen);
3318                 if (status)
3319                         return status;
3320
3321                 /* Stats accumulate */
3322                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3323                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3324                 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3325                 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3326                         s.n_bytes_dropped;
3327                 nt->stats.leaf.n_pkts_queued = qlen;
3328         } else {
3329                 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3330                         qid = tm_port_queue_id(dev,
3331                                 subport_id,
3332                                 pipe_id,
3333                                 tc_id,
3334                                 i);
3335
3336                         status = rte_sched_queue_read_stats(SCHED(p),
3337                                 qid,
3338                                 &s,
3339                                 &qlen);
3340                         if (status)
3341                                 return status;
3342
3343                         /* Stats accumulate */
3344                         nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3345                         nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3346                         nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3347                                 s.n_pkts_dropped;
3348                         nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3349                                 s.n_bytes_dropped;
3350                         nt->stats.leaf.n_pkts_queued = qlen;
3351                 }
3352         }
3353
3354         /* Stats copy */
3355         if (stats)
3356                 memcpy(stats, &nt->stats, sizeof(*stats));
3357
3358         if (stats_mask)
3359                 *stats_mask = STATS_MASK_DEFAULT;
3360
3361         /* Stats clear */
3362         if (clear)
3363                 memset(&nt->stats, 0, sizeof(nt->stats));
3364
3365         return 0;
3366 }
3367
3368 static int
3369 read_queue_stats(struct rte_eth_dev *dev,
3370         struct tm_node *nq,
3371         struct rte_tm_node_stats *stats,
3372         uint64_t *stats_mask,
3373         int clear)
3374 {
3375         struct pmd_internals *p = dev->data->dev_private;
3376         struct rte_sched_queue_stats s;
3377         uint16_t qlen;
3378
3379         uint32_t queue_id = tm_node_queue_id(dev, nq);
3380
3381         struct tm_node *nt = nq->parent_node;
3382         uint32_t tc_id = tm_node_tc_id(dev, nt);
3383
3384         struct tm_node *np = nt->parent_node;
3385         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3386
3387         struct tm_node *ns = np->parent_node;
3388         uint32_t subport_id = tm_node_subport_id(dev, ns);
3389
3390         /* Stats read */
3391         uint32_t qid = tm_port_queue_id(dev,
3392                 subport_id,
3393                 pipe_id,
3394                 tc_id,
3395                 queue_id);
3396
3397         int status = rte_sched_queue_read_stats(SCHED(p),
3398                 qid,
3399                 &s,
3400                 &qlen);
3401         if (status)
3402                 return status;
3403
3404         /* Stats accumulate */
3405         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3406         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3407         nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3408         nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3409                 s.n_bytes_dropped;
3410         nq->stats.leaf.n_pkts_queued = qlen;
3411
3412         /* Stats copy */
3413         if (stats)
3414                 memcpy(stats, &nq->stats, sizeof(*stats));
3415
3416         if (stats_mask)
3417                 *stats_mask = STATS_MASK_QUEUE;
3418
3419         /* Stats clear */
3420         if (clear)
3421                 memset(&nq->stats, 0, sizeof(nq->stats));
3422
3423         return 0;
3424 }
3425
3426 /* Traffic manager read stats counters for specific node */
3427 static int
3428 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3429         uint32_t node_id,
3430         struct rte_tm_node_stats *stats,
3431         uint64_t *stats_mask,
3432         int clear,
3433         struct rte_tm_error *error)
3434 {
3435         struct tm_node *n;
3436
3437         /* Port must be started and TM used. */
3438         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3439                 return -rte_tm_error_set(error,
3440                         EBUSY,
3441                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3442                         NULL,
3443                         rte_strerror(EBUSY));
3444
3445         /* Node must be valid */
3446         n = tm_node_search(dev, node_id);
3447         if (n == NULL)
3448                 return -rte_tm_error_set(error,
3449                         EINVAL,
3450                         RTE_TM_ERROR_TYPE_NODE_ID,
3451                         NULL,
3452                         rte_strerror(EINVAL));
3453
3454         switch (n->level) {
3455         case TM_NODE_LEVEL_PORT:
3456                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3457                         return -rte_tm_error_set(error,
3458                                 EINVAL,
3459                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3460                                 NULL,
3461                                 rte_strerror(EINVAL));
3462                 return 0;
3463
3464         case TM_NODE_LEVEL_SUBPORT:
3465                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3466                         return -rte_tm_error_set(error,
3467                                 EINVAL,
3468                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3469                                 NULL,
3470                                 rte_strerror(EINVAL));
3471                 return 0;
3472
3473         case TM_NODE_LEVEL_PIPE:
3474                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3475                         return -rte_tm_error_set(error,
3476                                 EINVAL,
3477                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3478                                 NULL,
3479                                 rte_strerror(EINVAL));
3480                 return 0;
3481
3482         case TM_NODE_LEVEL_TC:
3483                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3484                         return -rte_tm_error_set(error,
3485                                 EINVAL,
3486                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3487                                 NULL,
3488                                 rte_strerror(EINVAL));
3489                 return 0;
3490
3491         case TM_NODE_LEVEL_QUEUE:
3492         default:
3493                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3494                         return -rte_tm_error_set(error,
3495                                 EINVAL,
3496                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3497                                 NULL,
3498                                 rte_strerror(EINVAL));
3499                 return 0;
3500         }
3501 }
3502
3503 const struct rte_tm_ops pmd_tm_ops = {
3504         .node_type_get = pmd_tm_node_type_get,
3505         .capabilities_get = pmd_tm_capabilities_get,
3506         .level_capabilities_get = pmd_tm_level_capabilities_get,
3507         .node_capabilities_get = pmd_tm_node_capabilities_get,
3508
3509         .wred_profile_add = pmd_tm_wred_profile_add,
3510         .wred_profile_delete = pmd_tm_wred_profile_delete,
3511         .shared_wred_context_add_update = NULL,
3512         .shared_wred_context_delete = NULL,
3513
3514         .shaper_profile_add = pmd_tm_shaper_profile_add,
3515         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3516         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3517         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3518
3519         .node_add = pmd_tm_node_add,
3520         .node_delete = pmd_tm_node_delete,
3521         .node_suspend = NULL,
3522         .node_resume = NULL,
3523         .hierarchy_commit = pmd_tm_hierarchy_commit,
3524
3525         .node_parent_update = pmd_tm_node_parent_update,
3526         .node_shaper_update = pmd_tm_node_shaper_update,
3527         .node_shared_shaper_update = NULL,
3528         .node_stats_update = NULL,
3529         .node_wfq_weight_mode_update = NULL,
3530         .node_cman_update = NULL,
3531         .node_wred_context_update = NULL,
3532         .node_shared_wred_context_update = NULL,
3533
3534         .node_stats_read = pmd_tm_node_stats_read,
3535 };