net/bnxt: fix Tx and Rx burst for secondary process
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
13
14 #define BYTES_IN_MBPS           (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
20 {
21         uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
22         uint32_t i;
23
24         /* rate */
25         if (params->soft.tm.rate) {
26                 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
27                         return -EINVAL;
28         } else {
29                 params->soft.tm.rate =
30                         (hard_rate_bytes_per_sec > UINT32_MAX) ?
31                                 UINT32_MAX : hard_rate_bytes_per_sec;
32         }
33
34         /* nb_queues */
35         if (params->soft.tm.nb_queues == 0)
36                 return -EINVAL;
37
38         if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
39                 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
40
41         params->soft.tm.nb_queues =
42                 rte_align32pow2(params->soft.tm.nb_queues);
43
44         /* qsize */
45         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
46                 if (params->soft.tm.qsize[i] == 0)
47                         return -EINVAL;
48
49                 params->soft.tm.qsize[i] =
50                         rte_align32pow2(params->soft.tm.qsize[i]);
51         }
52
53         /* enq_bsz, deq_bsz */
54         if (params->soft.tm.enq_bsz == 0 ||
55                 params->soft.tm.deq_bsz == 0 ||
56                 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
57                 return -EINVAL;
58
59         return 0;
60 }
61
62 static void
63 tm_hierarchy_init(struct pmd_internals *p)
64 {
65         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
66
67         /* Initialize shaper profile list */
68         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
69
70         /* Initialize shared shaper list */
71         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
72
73         /* Initialize wred profile list */
74         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
75
76         /* Initialize TM node list */
77         TAILQ_INIT(&p->soft.tm.h.nodes);
78 }
79
80 static void
81 tm_hierarchy_uninit(struct pmd_internals *p)
82 {
83         /* Remove all nodes*/
84         for ( ; ; ) {
85                 struct tm_node *tm_node;
86
87                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
88                 if (tm_node == NULL)
89                         break;
90
91                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
92                 free(tm_node);
93         }
94
95         /* Remove all WRED profiles */
96         for ( ; ; ) {
97                 struct tm_wred_profile *wred_profile;
98
99                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
100                 if (wred_profile == NULL)
101                         break;
102
103                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
104                 free(wred_profile);
105         }
106
107         /* Remove all shared shapers */
108         for ( ; ; ) {
109                 struct tm_shared_shaper *shared_shaper;
110
111                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
112                 if (shared_shaper == NULL)
113                         break;
114
115                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
116                 free(shared_shaper);
117         }
118
119         /* Remove all shaper profiles */
120         for ( ; ; ) {
121                 struct tm_shaper_profile *shaper_profile;
122
123                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
124                 if (shaper_profile == NULL)
125                         break;
126
127                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
128                         shaper_profile, node);
129                 free(shaper_profile);
130         }
131
132         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
133 }
134
135 int
136 tm_init(struct pmd_internals *p,
137         struct pmd_params *params,
138         int numa_node)
139 {
140         uint32_t enq_bsz = params->soft.tm.enq_bsz;
141         uint32_t deq_bsz = params->soft.tm.deq_bsz;
142
143         p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
144                 2 * enq_bsz * sizeof(struct rte_mbuf *),
145                 0,
146                 numa_node);
147
148         if (p->soft.tm.pkts_enq == NULL)
149                 return -ENOMEM;
150
151         p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
152                 deq_bsz * sizeof(struct rte_mbuf *),
153                 0,
154                 numa_node);
155
156         if (p->soft.tm.pkts_deq == NULL) {
157                 rte_free(p->soft.tm.pkts_enq);
158                 return -ENOMEM;
159         }
160
161         tm_hierarchy_init(p);
162
163         return 0;
164 }
165
166 void
167 tm_free(struct pmd_internals *p)
168 {
169         tm_hierarchy_uninit(p);
170         rte_free(p->soft.tm.pkts_enq);
171         rte_free(p->soft.tm.pkts_deq);
172 }
173
174 int
175 tm_start(struct pmd_internals *p)
176 {
177         struct tm_params *t = &p->soft.tm.params;
178         uint32_t n_subports, subport_id;
179         int status;
180
181         /* Is hierarchy frozen? */
182         if (p->soft.tm.hierarchy_frozen == 0)
183                 return -1;
184
185         /* Port */
186         p->soft.tm.sched = rte_sched_port_config(&t->port_params);
187         if (p->soft.tm.sched == NULL)
188                 return -1;
189
190         /* Subport */
191         n_subports = t->port_params.n_subports_per_port;
192         for (subport_id = 0; subport_id < n_subports; subport_id++) {
193                 uint32_t n_pipes_per_subport =
194                         t->port_params.n_pipes_per_subport;
195                 uint32_t pipe_id;
196
197                 status = rte_sched_subport_config(p->soft.tm.sched,
198                         subport_id,
199                         &t->subport_params[subport_id]);
200                 if (status) {
201                         rte_sched_port_free(p->soft.tm.sched);
202                         return -1;
203                 }
204
205                 /* Pipe */
206                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
207                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
208                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
209                                 pipe_id;
210                         int profile_id = t->pipe_to_profile[pos];
211
212                         if (profile_id < 0)
213                                 continue;
214
215                         status = rte_sched_pipe_config(p->soft.tm.sched,
216                                 subport_id,
217                                 pipe_id,
218                                 profile_id);
219                         if (status) {
220                                 rte_sched_port_free(p->soft.tm.sched);
221                                 return -1;
222                         }
223                 }
224         }
225
226         return 0;
227 }
228
229 void
230 tm_stop(struct pmd_internals *p)
231 {
232         if (p->soft.tm.sched)
233                 rte_sched_port_free(p->soft.tm.sched);
234
235         /* Unfreeze hierarchy */
236         p->soft.tm.hierarchy_frozen = 0;
237 }
238
239 static struct tm_shaper_profile *
240 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
241 {
242         struct pmd_internals *p = dev->data->dev_private;
243         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
244         struct tm_shaper_profile *sp;
245
246         TAILQ_FOREACH(sp, spl, node)
247                 if (shaper_profile_id == sp->shaper_profile_id)
248                         return sp;
249
250         return NULL;
251 }
252
253 static struct tm_shared_shaper *
254 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
255 {
256         struct pmd_internals *p = dev->data->dev_private;
257         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
258         struct tm_shared_shaper *ss;
259
260         TAILQ_FOREACH(ss, ssl, node)
261                 if (shared_shaper_id == ss->shared_shaper_id)
262                         return ss;
263
264         return NULL;
265 }
266
267 static struct tm_wred_profile *
268 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
269 {
270         struct pmd_internals *p = dev->data->dev_private;
271         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
272         struct tm_wred_profile *wp;
273
274         TAILQ_FOREACH(wp, wpl, node)
275                 if (wred_profile_id == wp->wred_profile_id)
276                         return wp;
277
278         return NULL;
279 }
280
281 static struct tm_node *
282 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
283 {
284         struct pmd_internals *p = dev->data->dev_private;
285         struct tm_node_list *nl = &p->soft.tm.h.nodes;
286         struct tm_node *n;
287
288         TAILQ_FOREACH(n, nl, node)
289                 if (n->node_id == node_id)
290                         return n;
291
292         return NULL;
293 }
294
295 static struct tm_node *
296 tm_root_node_present(struct rte_eth_dev *dev)
297 {
298         struct pmd_internals *p = dev->data->dev_private;
299         struct tm_node_list *nl = &p->soft.tm.h.nodes;
300         struct tm_node *n;
301
302         TAILQ_FOREACH(n, nl, node)
303                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
304                         return n;
305
306         return NULL;
307 }
308
309 static uint32_t
310 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
311 {
312         struct pmd_internals *p = dev->data->dev_private;
313         struct tm_node_list *nl = &p->soft.tm.h.nodes;
314         struct tm_node *ns;
315         uint32_t subport_id;
316
317         subport_id = 0;
318         TAILQ_FOREACH(ns, nl, node) {
319                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
320                         continue;
321
322                 if (ns->node_id == subport_node->node_id)
323                         return subport_id;
324
325                 subport_id++;
326         }
327
328         return UINT32_MAX;
329 }
330
331 static uint32_t
332 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
333 {
334         struct pmd_internals *p = dev->data->dev_private;
335         struct tm_node_list *nl = &p->soft.tm.h.nodes;
336         struct tm_node *np;
337         uint32_t pipe_id;
338
339         pipe_id = 0;
340         TAILQ_FOREACH(np, nl, node) {
341                 if (np->level != TM_NODE_LEVEL_PIPE ||
342                         np->parent_node_id != pipe_node->parent_node_id)
343                         continue;
344
345                 if (np->node_id == pipe_node->node_id)
346                         return pipe_id;
347
348                 pipe_id++;
349         }
350
351         return UINT32_MAX;
352 }
353
354 static uint32_t
355 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
356 {
357         return tc_node->priority;
358 }
359
360 static uint32_t
361 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
362 {
363         struct pmd_internals *p = dev->data->dev_private;
364         struct tm_node_list *nl = &p->soft.tm.h.nodes;
365         struct tm_node *nq;
366         uint32_t queue_id;
367
368         queue_id = 0;
369         TAILQ_FOREACH(nq, nl, node) {
370                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
371                         nq->parent_node_id != queue_node->parent_node_id)
372                         continue;
373
374                 if (nq->node_id == queue_node->node_id)
375                         return queue_id;
376
377                 queue_id++;
378         }
379
380         return UINT32_MAX;
381 }
382
383 static uint32_t
384 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
385 {
386         struct pmd_internals *p = dev->data->dev_private;
387         uint32_t n_queues_max = p->params.soft.tm.nb_queues;
388         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
389         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
390         uint32_t n_subports_max = n_pipes_max;
391         uint32_t n_root_max = 1;
392
393         switch (level) {
394         case TM_NODE_LEVEL_PORT:
395                 return n_root_max;
396         case TM_NODE_LEVEL_SUBPORT:
397                 return n_subports_max;
398         case TM_NODE_LEVEL_PIPE:
399                 return n_pipes_max;
400         case TM_NODE_LEVEL_TC:
401                 return n_tc_max;
402         case TM_NODE_LEVEL_QUEUE:
403         default:
404                 return n_queues_max;
405         }
406 }
407
408 /* Traffic manager node type get */
409 static int
410 pmd_tm_node_type_get(struct rte_eth_dev *dev,
411         uint32_t node_id,
412         int *is_leaf,
413         struct rte_tm_error *error)
414 {
415         struct pmd_internals *p = dev->data->dev_private;
416
417         if (is_leaf == NULL)
418                 return -rte_tm_error_set(error,
419                    EINVAL,
420                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
421                    NULL,
422                    rte_strerror(EINVAL));
423
424         if (node_id == RTE_TM_NODE_ID_NULL ||
425                 (tm_node_search(dev, node_id) == NULL))
426                 return -rte_tm_error_set(error,
427                    EINVAL,
428                    RTE_TM_ERROR_TYPE_NODE_ID,
429                    NULL,
430                    rte_strerror(EINVAL));
431
432         *is_leaf = node_id < p->params.soft.tm.nb_queues;
433
434         return 0;
435 }
436
437 #ifdef RTE_SCHED_RED
438 #define WRED_SUPPORTED                                          1
439 #else
440 #define WRED_SUPPORTED                                          0
441 #endif
442
443 #define STATS_MASK_DEFAULT                                      \
444         (RTE_TM_STATS_N_PKTS |                                  \
445         RTE_TM_STATS_N_BYTES |                                  \
446         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
447         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
448
449 #define STATS_MASK_QUEUE                                                \
450         (STATS_MASK_DEFAULT |                                   \
451         RTE_TM_STATS_N_PKTS_QUEUED)
452
453 static const struct rte_tm_capabilities tm_cap = {
454         .n_nodes_max = UINT32_MAX,
455         .n_levels_max = TM_NODE_LEVEL_MAX,
456
457         .non_leaf_nodes_identical = 0,
458         .leaf_nodes_identical = 1,
459
460         .shaper_n_max = UINT32_MAX,
461         .shaper_private_n_max = UINT32_MAX,
462         .shaper_private_dual_rate_n_max = 0,
463         .shaper_private_rate_min = 1,
464         .shaper_private_rate_max = UINT32_MAX,
465
466         .shaper_shared_n_max = UINT32_MAX,
467         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
468         .shaper_shared_n_shapers_per_node_max = 1,
469         .shaper_shared_dual_rate_n_max = 0,
470         .shaper_shared_rate_min = 1,
471         .shaper_shared_rate_max = UINT32_MAX,
472
473         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
474         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
475
476         .sched_n_children_max = UINT32_MAX,
477         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
478         .sched_wfq_n_children_per_group_max = UINT32_MAX,
479         .sched_wfq_n_groups_max = 1,
480         .sched_wfq_weight_max = UINT32_MAX,
481
482         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
483         .cman_wred_byte_mode_supported = 0,
484         .cman_head_drop_supported = 0,
485         .cman_wred_context_n_max = 0,
486         .cman_wred_context_private_n_max = 0,
487         .cman_wred_context_shared_n_max = 0,
488         .cman_wred_context_shared_n_nodes_per_context_max = 0,
489         .cman_wred_context_shared_n_contexts_per_node_max = 0,
490
491         .mark_vlan_dei_supported = {0, 0, 0},
492         .mark_ip_ecn_tcp_supported = {0, 0, 0},
493         .mark_ip_ecn_sctp_supported = {0, 0, 0},
494         .mark_ip_dscp_supported = {0, 0, 0},
495
496         .dynamic_update_mask = 0,
497
498         .stats_mask = STATS_MASK_QUEUE,
499 };
500
501 /* Traffic manager capabilities get */
502 static int
503 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
504         struct rte_tm_capabilities *cap,
505         struct rte_tm_error *error)
506 {
507         if (cap == NULL)
508                 return -rte_tm_error_set(error,
509                    EINVAL,
510                    RTE_TM_ERROR_TYPE_CAPABILITIES,
511                    NULL,
512                    rte_strerror(EINVAL));
513
514         memcpy(cap, &tm_cap, sizeof(*cap));
515
516         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
517                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
518                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
519                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
520                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
521
522         cap->shaper_private_n_max =
523                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
524                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
525                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
526                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
527
528         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
529                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
530
531         cap->shaper_n_max = cap->shaper_private_n_max +
532                 cap->shaper_shared_n_max;
533
534         cap->shaper_shared_n_nodes_per_shaper_max =
535                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
536
537         cap->sched_n_children_max = RTE_MAX(
538                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
539                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
540
541         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
542
543         if (WRED_SUPPORTED)
544                 cap->cman_wred_context_private_n_max =
545                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
546
547         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
548                 cap->cman_wred_context_shared_n_max;
549
550         return 0;
551 }
552
553 static const struct rte_tm_level_capabilities tm_level_cap[] = {
554         [TM_NODE_LEVEL_PORT] = {
555                 .n_nodes_max = 1,
556                 .n_nodes_nonleaf_max = 1,
557                 .n_nodes_leaf_max = 0,
558                 .non_leaf_nodes_identical = 1,
559                 .leaf_nodes_identical = 0,
560
561                 {.nonleaf = {
562                         .shaper_private_supported = 1,
563                         .shaper_private_dual_rate_supported = 0,
564                         .shaper_private_rate_min = 1,
565                         .shaper_private_rate_max = UINT32_MAX,
566                         .shaper_shared_n_max = 0,
567
568                         .sched_n_children_max = UINT32_MAX,
569                         .sched_sp_n_priorities_max = 1,
570                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
571                         .sched_wfq_n_groups_max = 1,
572                         .sched_wfq_weight_max = 1,
573
574                         .stats_mask = STATS_MASK_DEFAULT,
575                 } },
576         },
577
578         [TM_NODE_LEVEL_SUBPORT] = {
579                 .n_nodes_max = UINT32_MAX,
580                 .n_nodes_nonleaf_max = UINT32_MAX,
581                 .n_nodes_leaf_max = 0,
582                 .non_leaf_nodes_identical = 1,
583                 .leaf_nodes_identical = 0,
584
585                 {.nonleaf = {
586                         .shaper_private_supported = 1,
587                         .shaper_private_dual_rate_supported = 0,
588                         .shaper_private_rate_min = 1,
589                         .shaper_private_rate_max = UINT32_MAX,
590                         .shaper_shared_n_max = 0,
591
592                         .sched_n_children_max = UINT32_MAX,
593                         .sched_sp_n_priorities_max = 1,
594                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
595                         .sched_wfq_n_groups_max = 1,
596 #ifdef RTE_SCHED_SUBPORT_TC_OV
597                         .sched_wfq_weight_max = UINT32_MAX,
598 #else
599                         .sched_wfq_weight_max = 1,
600 #endif
601                         .stats_mask = STATS_MASK_DEFAULT,
602                 } },
603         },
604
605         [TM_NODE_LEVEL_PIPE] = {
606                 .n_nodes_max = UINT32_MAX,
607                 .n_nodes_nonleaf_max = UINT32_MAX,
608                 .n_nodes_leaf_max = 0,
609                 .non_leaf_nodes_identical = 1,
610                 .leaf_nodes_identical = 0,
611
612                 {.nonleaf = {
613                         .shaper_private_supported = 1,
614                         .shaper_private_dual_rate_supported = 0,
615                         .shaper_private_rate_min = 1,
616                         .shaper_private_rate_max = UINT32_MAX,
617                         .shaper_shared_n_max = 0,
618
619                         .sched_n_children_max =
620                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
621                         .sched_sp_n_priorities_max =
622                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
623                         .sched_wfq_n_children_per_group_max = 1,
624                         .sched_wfq_n_groups_max = 0,
625                         .sched_wfq_weight_max = 1,
626
627                         .stats_mask = STATS_MASK_DEFAULT,
628                 } },
629         },
630
631         [TM_NODE_LEVEL_TC] = {
632                 .n_nodes_max = UINT32_MAX,
633                 .n_nodes_nonleaf_max = UINT32_MAX,
634                 .n_nodes_leaf_max = 0,
635                 .non_leaf_nodes_identical = 1,
636                 .leaf_nodes_identical = 0,
637
638                 {.nonleaf = {
639                         .shaper_private_supported = 1,
640                         .shaper_private_dual_rate_supported = 0,
641                         .shaper_private_rate_min = 1,
642                         .shaper_private_rate_max = UINT32_MAX,
643                         .shaper_shared_n_max = 1,
644
645                         .sched_n_children_max =
646                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
647                         .sched_sp_n_priorities_max = 1,
648                         .sched_wfq_n_children_per_group_max =
649                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
650                         .sched_wfq_n_groups_max = 1,
651                         .sched_wfq_weight_max = UINT32_MAX,
652
653                         .stats_mask = STATS_MASK_DEFAULT,
654                 } },
655         },
656
657         [TM_NODE_LEVEL_QUEUE] = {
658                 .n_nodes_max = UINT32_MAX,
659                 .n_nodes_nonleaf_max = 0,
660                 .n_nodes_leaf_max = UINT32_MAX,
661                 .non_leaf_nodes_identical = 0,
662                 .leaf_nodes_identical = 1,
663
664                 {.leaf = {
665                         .shaper_private_supported = 0,
666                         .shaper_private_dual_rate_supported = 0,
667                         .shaper_private_rate_min = 0,
668                         .shaper_private_rate_max = 0,
669                         .shaper_shared_n_max = 0,
670
671                         .cman_head_drop_supported = 0,
672                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
673                         .cman_wred_byte_mode_supported = 0,
674                         .cman_wred_context_private_supported = WRED_SUPPORTED,
675                         .cman_wred_context_shared_n_max = 0,
676
677                         .stats_mask = STATS_MASK_QUEUE,
678                 } },
679         },
680 };
681
682 /* Traffic manager level capabilities get */
683 static int
684 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
685         uint32_t level_id,
686         struct rte_tm_level_capabilities *cap,
687         struct rte_tm_error *error)
688 {
689         if (cap == NULL)
690                 return -rte_tm_error_set(error,
691                    EINVAL,
692                    RTE_TM_ERROR_TYPE_CAPABILITIES,
693                    NULL,
694                    rte_strerror(EINVAL));
695
696         if (level_id >= TM_NODE_LEVEL_MAX)
697                 return -rte_tm_error_set(error,
698                    EINVAL,
699                    RTE_TM_ERROR_TYPE_LEVEL_ID,
700                    NULL,
701                    rte_strerror(EINVAL));
702
703         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
704
705         switch (level_id) {
706         case TM_NODE_LEVEL_PORT:
707                 cap->nonleaf.sched_n_children_max =
708                         tm_level_get_max_nodes(dev,
709                                 TM_NODE_LEVEL_SUBPORT);
710                 cap->nonleaf.sched_wfq_n_children_per_group_max =
711                         cap->nonleaf.sched_n_children_max;
712                 break;
713
714         case TM_NODE_LEVEL_SUBPORT:
715                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
716                         TM_NODE_LEVEL_SUBPORT);
717                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
718                 cap->nonleaf.sched_n_children_max =
719                         tm_level_get_max_nodes(dev,
720                                 TM_NODE_LEVEL_PIPE);
721                 cap->nonleaf.sched_wfq_n_children_per_group_max =
722                         cap->nonleaf.sched_n_children_max;
723                 break;
724
725         case TM_NODE_LEVEL_PIPE:
726                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
727                         TM_NODE_LEVEL_PIPE);
728                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
729                 break;
730
731         case TM_NODE_LEVEL_TC:
732                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
733                         TM_NODE_LEVEL_TC);
734                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
735                 break;
736
737         case TM_NODE_LEVEL_QUEUE:
738         default:
739                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
740                         TM_NODE_LEVEL_QUEUE);
741                 cap->n_nodes_leaf_max = cap->n_nodes_max;
742                 break;
743         }
744
745         return 0;
746 }
747
748 static const struct rte_tm_node_capabilities tm_node_cap[] = {
749         [TM_NODE_LEVEL_PORT] = {
750                 .shaper_private_supported = 1,
751                 .shaper_private_dual_rate_supported = 0,
752                 .shaper_private_rate_min = 1,
753                 .shaper_private_rate_max = UINT32_MAX,
754                 .shaper_shared_n_max = 0,
755
756                 {.nonleaf = {
757                         .sched_n_children_max = UINT32_MAX,
758                         .sched_sp_n_priorities_max = 1,
759                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
760                         .sched_wfq_n_groups_max = 1,
761                         .sched_wfq_weight_max = 1,
762                 } },
763
764                 .stats_mask = STATS_MASK_DEFAULT,
765         },
766
767         [TM_NODE_LEVEL_SUBPORT] = {
768                 .shaper_private_supported = 1,
769                 .shaper_private_dual_rate_supported = 0,
770                 .shaper_private_rate_min = 1,
771                 .shaper_private_rate_max = UINT32_MAX,
772                 .shaper_shared_n_max = 0,
773
774                 {.nonleaf = {
775                         .sched_n_children_max = UINT32_MAX,
776                         .sched_sp_n_priorities_max = 1,
777                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
778                         .sched_wfq_n_groups_max = 1,
779                         .sched_wfq_weight_max = UINT32_MAX,
780                 } },
781
782                 .stats_mask = STATS_MASK_DEFAULT,
783         },
784
785         [TM_NODE_LEVEL_PIPE] = {
786                 .shaper_private_supported = 1,
787                 .shaper_private_dual_rate_supported = 0,
788                 .shaper_private_rate_min = 1,
789                 .shaper_private_rate_max = UINT32_MAX,
790                 .shaper_shared_n_max = 0,
791
792                 {.nonleaf = {
793                         .sched_n_children_max =
794                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
795                         .sched_sp_n_priorities_max =
796                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
797                         .sched_wfq_n_children_per_group_max = 1,
798                         .sched_wfq_n_groups_max = 0,
799                         .sched_wfq_weight_max = 1,
800                 } },
801
802                 .stats_mask = STATS_MASK_DEFAULT,
803         },
804
805         [TM_NODE_LEVEL_TC] = {
806                 .shaper_private_supported = 1,
807                 .shaper_private_dual_rate_supported = 0,
808                 .shaper_private_rate_min = 1,
809                 .shaper_private_rate_max = UINT32_MAX,
810                 .shaper_shared_n_max = 1,
811
812                 {.nonleaf = {
813                         .sched_n_children_max =
814                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
815                         .sched_sp_n_priorities_max = 1,
816                         .sched_wfq_n_children_per_group_max =
817                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
818                         .sched_wfq_n_groups_max = 1,
819                         .sched_wfq_weight_max = UINT32_MAX,
820                 } },
821
822                 .stats_mask = STATS_MASK_DEFAULT,
823         },
824
825         [TM_NODE_LEVEL_QUEUE] = {
826                 .shaper_private_supported = 0,
827                 .shaper_private_dual_rate_supported = 0,
828                 .shaper_private_rate_min = 0,
829                 .shaper_private_rate_max = 0,
830                 .shaper_shared_n_max = 0,
831
832
833                 {.leaf = {
834                         .cman_head_drop_supported = 0,
835                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
836                         .cman_wred_byte_mode_supported = 0,
837                         .cman_wred_context_private_supported = WRED_SUPPORTED,
838                         .cman_wred_context_shared_n_max = 0,
839                 } },
840
841                 .stats_mask = STATS_MASK_QUEUE,
842         },
843 };
844
845 /* Traffic manager node capabilities get */
846 static int
847 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
848         uint32_t node_id,
849         struct rte_tm_node_capabilities *cap,
850         struct rte_tm_error *error)
851 {
852         struct tm_node *tm_node;
853
854         if (cap == NULL)
855                 return -rte_tm_error_set(error,
856                    EINVAL,
857                    RTE_TM_ERROR_TYPE_CAPABILITIES,
858                    NULL,
859                    rte_strerror(EINVAL));
860
861         tm_node = tm_node_search(dev, node_id);
862         if (tm_node == NULL)
863                 return -rte_tm_error_set(error,
864                    EINVAL,
865                    RTE_TM_ERROR_TYPE_NODE_ID,
866                    NULL,
867                    rte_strerror(EINVAL));
868
869         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
870
871         switch (tm_node->level) {
872         case TM_NODE_LEVEL_PORT:
873                 cap->nonleaf.sched_n_children_max =
874                         tm_level_get_max_nodes(dev,
875                                 TM_NODE_LEVEL_SUBPORT);
876                 cap->nonleaf.sched_wfq_n_children_per_group_max =
877                         cap->nonleaf.sched_n_children_max;
878                 break;
879
880         case TM_NODE_LEVEL_SUBPORT:
881                 cap->nonleaf.sched_n_children_max =
882                         tm_level_get_max_nodes(dev,
883                                 TM_NODE_LEVEL_PIPE);
884                 cap->nonleaf.sched_wfq_n_children_per_group_max =
885                         cap->nonleaf.sched_n_children_max;
886                 break;
887
888         case TM_NODE_LEVEL_PIPE:
889         case TM_NODE_LEVEL_TC:
890         case TM_NODE_LEVEL_QUEUE:
891         default:
892                 break;
893         }
894
895         return 0;
896 }
897
898 static int
899 shaper_profile_check(struct rte_eth_dev *dev,
900         uint32_t shaper_profile_id,
901         struct rte_tm_shaper_params *profile,
902         struct rte_tm_error *error)
903 {
904         struct tm_shaper_profile *sp;
905
906         /* Shaper profile ID must not be NONE. */
907         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
908                 return -rte_tm_error_set(error,
909                         EINVAL,
910                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
911                         NULL,
912                         rte_strerror(EINVAL));
913
914         /* Shaper profile must not exist. */
915         sp = tm_shaper_profile_search(dev, shaper_profile_id);
916         if (sp)
917                 return -rte_tm_error_set(error,
918                         EEXIST,
919                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
920                         NULL,
921                         rte_strerror(EEXIST));
922
923         /* Profile must not be NULL. */
924         if (profile == NULL)
925                 return -rte_tm_error_set(error,
926                         EINVAL,
927                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
928                         NULL,
929                         rte_strerror(EINVAL));
930
931         /* Peak rate: non-zero, 32-bit */
932         if (profile->peak.rate == 0 ||
933                 profile->peak.rate >= UINT32_MAX)
934                 return -rte_tm_error_set(error,
935                         EINVAL,
936                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
937                         NULL,
938                         rte_strerror(EINVAL));
939
940         /* Peak size: non-zero, 32-bit */
941         if (profile->peak.size == 0 ||
942                 profile->peak.size >= UINT32_MAX)
943                 return -rte_tm_error_set(error,
944                         EINVAL,
945                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
946                         NULL,
947                         rte_strerror(EINVAL));
948
949         /* Dual-rate profiles are not supported. */
950         if (profile->committed.rate != 0)
951                 return -rte_tm_error_set(error,
952                         EINVAL,
953                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
954                         NULL,
955                         rte_strerror(EINVAL));
956
957         /* Packet length adjust: 24 bytes */
958         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
959                 return -rte_tm_error_set(error,
960                         EINVAL,
961                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
962                         NULL,
963                         rte_strerror(EINVAL));
964
965         return 0;
966 }
967
968 /* Traffic manager shaper profile add */
969 static int
970 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
971         uint32_t shaper_profile_id,
972         struct rte_tm_shaper_params *profile,
973         struct rte_tm_error *error)
974 {
975         struct pmd_internals *p = dev->data->dev_private;
976         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
977         struct tm_shaper_profile *sp;
978         int status;
979
980         /* Check input params */
981         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
982         if (status)
983                 return status;
984
985         /* Memory allocation */
986         sp = calloc(1, sizeof(struct tm_shaper_profile));
987         if (sp == NULL)
988                 return -rte_tm_error_set(error,
989                         ENOMEM,
990                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
991                         NULL,
992                         rte_strerror(ENOMEM));
993
994         /* Fill in */
995         sp->shaper_profile_id = shaper_profile_id;
996         memcpy(&sp->params, profile, sizeof(sp->params));
997
998         /* Add to list */
999         TAILQ_INSERT_TAIL(spl, sp, node);
1000         p->soft.tm.h.n_shaper_profiles++;
1001
1002         return 0;
1003 }
1004
1005 /* Traffic manager shaper profile delete */
1006 static int
1007 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1008         uint32_t shaper_profile_id,
1009         struct rte_tm_error *error)
1010 {
1011         struct pmd_internals *p = dev->data->dev_private;
1012         struct tm_shaper_profile *sp;
1013
1014         /* Check existing */
1015         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1016         if (sp == NULL)
1017                 return -rte_tm_error_set(error,
1018                         EINVAL,
1019                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1020                         NULL,
1021                         rte_strerror(EINVAL));
1022
1023         /* Check unused */
1024         if (sp->n_users)
1025                 return -rte_tm_error_set(error,
1026                         EBUSY,
1027                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1028                         NULL,
1029                         rte_strerror(EBUSY));
1030
1031         /* Remove from list */
1032         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1033         p->soft.tm.h.n_shaper_profiles--;
1034         free(sp);
1035
1036         return 0;
1037 }
1038
1039 static struct tm_node *
1040 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1041         struct tm_shared_shaper *ss)
1042 {
1043         struct pmd_internals *p = dev->data->dev_private;
1044         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1045         struct tm_node *n;
1046
1047         /* Subport: each TC uses shared shaper  */
1048         TAILQ_FOREACH(n, nl, node) {
1049                 if (n->level != TM_NODE_LEVEL_TC ||
1050                         n->params.n_shared_shapers == 0 ||
1051                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1052                         continue;
1053
1054                 return n;
1055         }
1056
1057         return NULL;
1058 }
1059
1060 static int
1061 update_subport_tc_rate(struct rte_eth_dev *dev,
1062         struct tm_node *nt,
1063         struct tm_shared_shaper *ss,
1064         struct tm_shaper_profile *sp_new)
1065 {
1066         struct pmd_internals *p = dev->data->dev_private;
1067         uint32_t tc_id = tm_node_tc_id(dev, nt);
1068
1069         struct tm_node *np = nt->parent_node;
1070
1071         struct tm_node *ns = np->parent_node;
1072         uint32_t subport_id = tm_node_subport_id(dev, ns);
1073
1074         struct rte_sched_subport_params subport_params;
1075
1076         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1077                 ss->shaper_profile_id);
1078
1079         /* Derive new subport configuration. */
1080         memcpy(&subport_params,
1081                 &p->soft.tm.params.subport_params[subport_id],
1082                 sizeof(subport_params));
1083         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1084
1085         /* Update the subport configuration. */
1086         if (rte_sched_subport_config(p->soft.tm.sched,
1087                 subport_id, &subport_params))
1088                 return -1;
1089
1090         /* Commit changes. */
1091         sp_old->n_users--;
1092
1093         ss->shaper_profile_id = sp_new->shaper_profile_id;
1094         sp_new->n_users++;
1095
1096         memcpy(&p->soft.tm.params.subport_params[subport_id],
1097                 &subport_params,
1098                 sizeof(subport_params));
1099
1100         return 0;
1101 }
1102
1103 /* Traffic manager shared shaper add/update */
1104 static int
1105 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1106         uint32_t shared_shaper_id,
1107         uint32_t shaper_profile_id,
1108         struct rte_tm_error *error)
1109 {
1110         struct pmd_internals *p = dev->data->dev_private;
1111         struct tm_shared_shaper *ss;
1112         struct tm_shaper_profile *sp;
1113         struct tm_node *nt;
1114
1115         /* Shaper profile must be valid. */
1116         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1117         if (sp == NULL)
1118                 return -rte_tm_error_set(error,
1119                         EINVAL,
1120                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1121                         NULL,
1122                         rte_strerror(EINVAL));
1123
1124         /**
1125          * Add new shared shaper
1126          */
1127         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1128         if (ss == NULL) {
1129                 struct tm_shared_shaper_list *ssl =
1130                         &p->soft.tm.h.shared_shapers;
1131
1132                 /* Hierarchy must not be frozen */
1133                 if (p->soft.tm.hierarchy_frozen)
1134                         return -rte_tm_error_set(error,
1135                                 EBUSY,
1136                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1137                                 NULL,
1138                                 rte_strerror(EBUSY));
1139
1140                 /* Memory allocation */
1141                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1142                 if (ss == NULL)
1143                         return -rte_tm_error_set(error,
1144                                 ENOMEM,
1145                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1146                                 NULL,
1147                                 rte_strerror(ENOMEM));
1148
1149                 /* Fill in */
1150                 ss->shared_shaper_id = shared_shaper_id;
1151                 ss->shaper_profile_id = shaper_profile_id;
1152
1153                 /* Add to list */
1154                 TAILQ_INSERT_TAIL(ssl, ss, node);
1155                 p->soft.tm.h.n_shared_shapers++;
1156
1157                 return 0;
1158         }
1159
1160         /**
1161          * Update existing shared shaper
1162          */
1163         /* Hierarchy must be frozen (run-time update) */
1164         if (p->soft.tm.hierarchy_frozen == 0)
1165                 return -rte_tm_error_set(error,
1166                         EBUSY,
1167                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1168                         NULL,
1169                         rte_strerror(EBUSY));
1170
1171
1172         /* Propagate change. */
1173         nt = tm_shared_shaper_get_tc(dev, ss);
1174         if (update_subport_tc_rate(dev, nt, ss, sp))
1175                 return -rte_tm_error_set(error,
1176                         EINVAL,
1177                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1178                         NULL,
1179                         rte_strerror(EINVAL));
1180
1181         return 0;
1182 }
1183
1184 /* Traffic manager shared shaper delete */
1185 static int
1186 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1187         uint32_t shared_shaper_id,
1188         struct rte_tm_error *error)
1189 {
1190         struct pmd_internals *p = dev->data->dev_private;
1191         struct tm_shared_shaper *ss;
1192
1193         /* Check existing */
1194         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1195         if (ss == NULL)
1196                 return -rte_tm_error_set(error,
1197                         EINVAL,
1198                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1199                         NULL,
1200                         rte_strerror(EINVAL));
1201
1202         /* Check unused */
1203         if (ss->n_users)
1204                 return -rte_tm_error_set(error,
1205                         EBUSY,
1206                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1207                         NULL,
1208                         rte_strerror(EBUSY));
1209
1210         /* Remove from list */
1211         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1212         p->soft.tm.h.n_shared_shapers--;
1213         free(ss);
1214
1215         return 0;
1216 }
1217
1218 static int
1219 wred_profile_check(struct rte_eth_dev *dev,
1220         uint32_t wred_profile_id,
1221         struct rte_tm_wred_params *profile,
1222         struct rte_tm_error *error)
1223 {
1224         struct tm_wred_profile *wp;
1225         enum rte_tm_color color;
1226
1227         /* WRED profile ID must not be NONE. */
1228         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1229                 return -rte_tm_error_set(error,
1230                         EINVAL,
1231                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1232                         NULL,
1233                         rte_strerror(EINVAL));
1234
1235         /* WRED profile must not exist. */
1236         wp = tm_wred_profile_search(dev, wred_profile_id);
1237         if (wp)
1238                 return -rte_tm_error_set(error,
1239                         EEXIST,
1240                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1241                         NULL,
1242                         rte_strerror(EEXIST));
1243
1244         /* Profile must not be NULL. */
1245         if (profile == NULL)
1246                 return -rte_tm_error_set(error,
1247                         EINVAL,
1248                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1249                         NULL,
1250                         rte_strerror(EINVAL));
1251
1252         /* WRED profile should be in packet mode */
1253         if (profile->packet_mode == 0)
1254                 return -rte_tm_error_set(error,
1255                         ENOTSUP,
1256                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1257                         NULL,
1258                         rte_strerror(ENOTSUP));
1259
1260         /* min_th <= max_th, max_th > 0  */
1261         for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1262                 uint32_t min_th = profile->red_params[color].min_th;
1263                 uint32_t max_th = profile->red_params[color].max_th;
1264
1265                 if (min_th > max_th ||
1266                         max_th == 0 ||
1267                         min_th > UINT16_MAX ||
1268                         max_th > UINT16_MAX)
1269                         return -rte_tm_error_set(error,
1270                                 EINVAL,
1271                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1272                                 NULL,
1273                                 rte_strerror(EINVAL));
1274         }
1275
1276         return 0;
1277 }
1278
1279 /* Traffic manager WRED profile add */
1280 static int
1281 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1282         uint32_t wred_profile_id,
1283         struct rte_tm_wred_params *profile,
1284         struct rte_tm_error *error)
1285 {
1286         struct pmd_internals *p = dev->data->dev_private;
1287         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1288         struct tm_wred_profile *wp;
1289         int status;
1290
1291         /* Check input params */
1292         status = wred_profile_check(dev, wred_profile_id, profile, error);
1293         if (status)
1294                 return status;
1295
1296         /* Memory allocation */
1297         wp = calloc(1, sizeof(struct tm_wred_profile));
1298         if (wp == NULL)
1299                 return -rte_tm_error_set(error,
1300                         ENOMEM,
1301                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1302                         NULL,
1303                         rte_strerror(ENOMEM));
1304
1305         /* Fill in */
1306         wp->wred_profile_id = wred_profile_id;
1307         memcpy(&wp->params, profile, sizeof(wp->params));
1308
1309         /* Add to list */
1310         TAILQ_INSERT_TAIL(wpl, wp, node);
1311         p->soft.tm.h.n_wred_profiles++;
1312
1313         return 0;
1314 }
1315
1316 /* Traffic manager WRED profile delete */
1317 static int
1318 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1319         uint32_t wred_profile_id,
1320         struct rte_tm_error *error)
1321 {
1322         struct pmd_internals *p = dev->data->dev_private;
1323         struct tm_wred_profile *wp;
1324
1325         /* Check existing */
1326         wp = tm_wred_profile_search(dev, wred_profile_id);
1327         if (wp == NULL)
1328                 return -rte_tm_error_set(error,
1329                         EINVAL,
1330                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1331                         NULL,
1332                         rte_strerror(EINVAL));
1333
1334         /* Check unused */
1335         if (wp->n_users)
1336                 return -rte_tm_error_set(error,
1337                         EBUSY,
1338                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1339                         NULL,
1340                         rte_strerror(EBUSY));
1341
1342         /* Remove from list */
1343         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1344         p->soft.tm.h.n_wred_profiles--;
1345         free(wp);
1346
1347         return 0;
1348 }
1349
1350 static int
1351 node_add_check_port(struct rte_eth_dev *dev,
1352         uint32_t node_id,
1353         uint32_t parent_node_id __rte_unused,
1354         uint32_t priority,
1355         uint32_t weight,
1356         uint32_t level_id __rte_unused,
1357         struct rte_tm_node_params *params,
1358         struct rte_tm_error *error)
1359 {
1360         struct pmd_internals *p = dev->data->dev_private;
1361         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1362                 params->shaper_profile_id);
1363
1364         /* node type: non-leaf */
1365         if (node_id < p->params.soft.tm.nb_queues)
1366                 return -rte_tm_error_set(error,
1367                         EINVAL,
1368                         RTE_TM_ERROR_TYPE_NODE_ID,
1369                         NULL,
1370                         rte_strerror(EINVAL));
1371
1372         /* Priority must be 0 */
1373         if (priority != 0)
1374                 return -rte_tm_error_set(error,
1375                         EINVAL,
1376                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1377                         NULL,
1378                         rte_strerror(EINVAL));
1379
1380         /* Weight must be 1 */
1381         if (weight != 1)
1382                 return -rte_tm_error_set(error,
1383                         EINVAL,
1384                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1385                         NULL,
1386                         rte_strerror(EINVAL));
1387
1388         /* Shaper must be valid.
1389          * Shaper profile peak rate must fit the configured port rate.
1390          */
1391         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1392                 sp == NULL ||
1393                 sp->params.peak.rate > p->params.soft.tm.rate)
1394                 return -rte_tm_error_set(error,
1395                         EINVAL,
1396                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1397                         NULL,
1398                         rte_strerror(EINVAL));
1399
1400         /* No shared shapers */
1401         if (params->n_shared_shapers != 0)
1402                 return -rte_tm_error_set(error,
1403                         EINVAL,
1404                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1405                         NULL,
1406                         rte_strerror(EINVAL));
1407
1408         /* Number of SP priorities must be 1 */
1409         if (params->nonleaf.n_sp_priorities != 1)
1410                 return -rte_tm_error_set(error,
1411                         EINVAL,
1412                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1413                         NULL,
1414                         rte_strerror(EINVAL));
1415
1416         /* Stats */
1417         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1418                 return -rte_tm_error_set(error,
1419                         EINVAL,
1420                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1421                         NULL,
1422                         rte_strerror(EINVAL));
1423
1424         return 0;
1425 }
1426
1427 static int
1428 node_add_check_subport(struct rte_eth_dev *dev,
1429         uint32_t node_id,
1430         uint32_t parent_node_id __rte_unused,
1431         uint32_t priority,
1432         uint32_t weight,
1433         uint32_t level_id __rte_unused,
1434         struct rte_tm_node_params *params,
1435         struct rte_tm_error *error)
1436 {
1437         struct pmd_internals *p = dev->data->dev_private;
1438
1439         /* node type: non-leaf */
1440         if (node_id < p->params.soft.tm.nb_queues)
1441                 return -rte_tm_error_set(error,
1442                         EINVAL,
1443                         RTE_TM_ERROR_TYPE_NODE_ID,
1444                         NULL,
1445                         rte_strerror(EINVAL));
1446
1447         /* Priority must be 0 */
1448         if (priority != 0)
1449                 return -rte_tm_error_set(error,
1450                         EINVAL,
1451                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1452                         NULL,
1453                         rte_strerror(EINVAL));
1454
1455         /* Weight must be 1 */
1456         if (weight != 1)
1457                 return -rte_tm_error_set(error,
1458                         EINVAL,
1459                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1460                         NULL,
1461                         rte_strerror(EINVAL));
1462
1463         /* Shaper must be valid */
1464         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1465                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1466                 return -rte_tm_error_set(error,
1467                         EINVAL,
1468                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1469                         NULL,
1470                         rte_strerror(EINVAL));
1471
1472         /* No shared shapers */
1473         if (params->n_shared_shapers != 0)
1474                 return -rte_tm_error_set(error,
1475                         EINVAL,
1476                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1477                         NULL,
1478                         rte_strerror(EINVAL));
1479
1480         /* Number of SP priorities must be 1 */
1481         if (params->nonleaf.n_sp_priorities != 1)
1482                 return -rte_tm_error_set(error,
1483                         EINVAL,
1484                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1485                         NULL,
1486                         rte_strerror(EINVAL));
1487
1488         /* Stats */
1489         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1490                 return -rte_tm_error_set(error,
1491                         EINVAL,
1492                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1493                         NULL,
1494                         rte_strerror(EINVAL));
1495
1496         return 0;
1497 }
1498
1499 static int
1500 node_add_check_pipe(struct rte_eth_dev *dev,
1501         uint32_t node_id,
1502         uint32_t parent_node_id __rte_unused,
1503         uint32_t priority,
1504         uint32_t weight __rte_unused,
1505         uint32_t level_id __rte_unused,
1506         struct rte_tm_node_params *params,
1507         struct rte_tm_error *error)
1508 {
1509         struct pmd_internals *p = dev->data->dev_private;
1510
1511         /* node type: non-leaf */
1512         if (node_id < p->params.soft.tm.nb_queues)
1513                 return -rte_tm_error_set(error,
1514                         EINVAL,
1515                         RTE_TM_ERROR_TYPE_NODE_ID,
1516                         NULL,
1517                         rte_strerror(EINVAL));
1518
1519         /* Priority must be 0 */
1520         if (priority != 0)
1521                 return -rte_tm_error_set(error,
1522                         EINVAL,
1523                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1524                         NULL,
1525                         rte_strerror(EINVAL));
1526
1527         /* Shaper must be valid */
1528         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1529                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1530                 return -rte_tm_error_set(error,
1531                         EINVAL,
1532                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1533                         NULL,
1534                         rte_strerror(EINVAL));
1535
1536         /* No shared shapers */
1537         if (params->n_shared_shapers != 0)
1538                 return -rte_tm_error_set(error,
1539                         EINVAL,
1540                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1541                         NULL,
1542                         rte_strerror(EINVAL));
1543
1544         /* Number of SP priorities must be 4 */
1545         if (params->nonleaf.n_sp_priorities !=
1546                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1547                 return -rte_tm_error_set(error,
1548                         EINVAL,
1549                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1550                         NULL,
1551                         rte_strerror(EINVAL));
1552
1553         /* WFQ mode must be byte mode */
1554         if (params->nonleaf.wfq_weight_mode != NULL &&
1555                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1556                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1557                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1558                 params->nonleaf.wfq_weight_mode[3] != 0)
1559                 return -rte_tm_error_set(error,
1560                         EINVAL,
1561                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1562                         NULL,
1563                         rte_strerror(EINVAL));
1564
1565         /* Stats */
1566         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1567                 return -rte_tm_error_set(error,
1568                         EINVAL,
1569                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1570                         NULL,
1571                         rte_strerror(EINVAL));
1572
1573         return 0;
1574 }
1575
1576 static int
1577 node_add_check_tc(struct rte_eth_dev *dev,
1578         uint32_t node_id,
1579         uint32_t parent_node_id __rte_unused,
1580         uint32_t priority __rte_unused,
1581         uint32_t weight,
1582         uint32_t level_id __rte_unused,
1583         struct rte_tm_node_params *params,
1584         struct rte_tm_error *error)
1585 {
1586         struct pmd_internals *p = dev->data->dev_private;
1587
1588         /* node type: non-leaf */
1589         if (node_id < p->params.soft.tm.nb_queues)
1590                 return -rte_tm_error_set(error,
1591                         EINVAL,
1592                         RTE_TM_ERROR_TYPE_NODE_ID,
1593                         NULL,
1594                         rte_strerror(EINVAL));
1595
1596         /* Weight must be 1 */
1597         if (weight != 1)
1598                 return -rte_tm_error_set(error,
1599                         EINVAL,
1600                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1601                         NULL,
1602                         rte_strerror(EINVAL));
1603
1604         /* Shaper must be valid */
1605         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1606                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1607                 return -rte_tm_error_set(error,
1608                         EINVAL,
1609                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1610                         NULL,
1611                         rte_strerror(EINVAL));
1612
1613         /* Single valid shared shaper */
1614         if (params->n_shared_shapers > 1)
1615                 return -rte_tm_error_set(error,
1616                         EINVAL,
1617                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1618                         NULL,
1619                         rte_strerror(EINVAL));
1620
1621         if (params->n_shared_shapers == 1 &&
1622                 (params->shared_shaper_id == NULL ||
1623                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1624                 return -rte_tm_error_set(error,
1625                         EINVAL,
1626                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1627                         NULL,
1628                         rte_strerror(EINVAL));
1629
1630         /* Number of priorities must be 1 */
1631         if (params->nonleaf.n_sp_priorities != 1)
1632                 return -rte_tm_error_set(error,
1633                         EINVAL,
1634                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1635                         NULL,
1636                         rte_strerror(EINVAL));
1637
1638         /* Stats */
1639         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1640                 return -rte_tm_error_set(error,
1641                         EINVAL,
1642                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1643                         NULL,
1644                         rte_strerror(EINVAL));
1645
1646         return 0;
1647 }
1648
1649 static int
1650 node_add_check_queue(struct rte_eth_dev *dev,
1651         uint32_t node_id,
1652         uint32_t parent_node_id __rte_unused,
1653         uint32_t priority,
1654         uint32_t weight __rte_unused,
1655         uint32_t level_id __rte_unused,
1656         struct rte_tm_node_params *params,
1657         struct rte_tm_error *error)
1658 {
1659         struct pmd_internals *p = dev->data->dev_private;
1660
1661         /* node type: leaf */
1662         if (node_id >= p->params.soft.tm.nb_queues)
1663                 return -rte_tm_error_set(error,
1664                         EINVAL,
1665                         RTE_TM_ERROR_TYPE_NODE_ID,
1666                         NULL,
1667                         rte_strerror(EINVAL));
1668
1669         /* Priority must be 0 */
1670         if (priority != 0)
1671                 return -rte_tm_error_set(error,
1672                         EINVAL,
1673                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1674                         NULL,
1675                         rte_strerror(EINVAL));
1676
1677         /* No shaper */
1678         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1679                 return -rte_tm_error_set(error,
1680                         EINVAL,
1681                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1682                         NULL,
1683                         rte_strerror(EINVAL));
1684
1685         /* No shared shapers */
1686         if (params->n_shared_shapers != 0)
1687                 return -rte_tm_error_set(error,
1688                         EINVAL,
1689                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1690                         NULL,
1691                         rte_strerror(EINVAL));
1692
1693         /* Congestion management must not be head drop */
1694         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1695                 return -rte_tm_error_set(error,
1696                         EINVAL,
1697                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1698                         NULL,
1699                         rte_strerror(EINVAL));
1700
1701         /* Congestion management set to WRED */
1702         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1703                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1704                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1705                         wred_profile_id);
1706
1707                 /* WRED profile (for private WRED context) must be valid */
1708                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1709                         wp == NULL)
1710                         return -rte_tm_error_set(error,
1711                                 EINVAL,
1712                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1713                                 NULL,
1714                                 rte_strerror(EINVAL));
1715
1716                 /* No shared WRED contexts */
1717                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1718                         return -rte_tm_error_set(error,
1719                                 EINVAL,
1720                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1721                                 NULL,
1722                                 rte_strerror(EINVAL));
1723         }
1724
1725         /* Stats */
1726         if (params->stats_mask & ~STATS_MASK_QUEUE)
1727                 return -rte_tm_error_set(error,
1728                         EINVAL,
1729                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1730                         NULL,
1731                         rte_strerror(EINVAL));
1732
1733         return 0;
1734 }
1735
1736 static int
1737 node_add_check(struct rte_eth_dev *dev,
1738         uint32_t node_id,
1739         uint32_t parent_node_id,
1740         uint32_t priority,
1741         uint32_t weight,
1742         uint32_t level_id,
1743         struct rte_tm_node_params *params,
1744         struct rte_tm_error *error)
1745 {
1746         struct tm_node *pn;
1747         uint32_t level;
1748         int status;
1749
1750         /* node_id, parent_node_id:
1751          *    -node_id must not be RTE_TM_NODE_ID_NULL
1752          *    -node_id must not be in use
1753          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1754          *        -root node must not exist
1755          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1756          *        -parent_node_id must be valid
1757          */
1758         if (node_id == RTE_TM_NODE_ID_NULL)
1759                 return -rte_tm_error_set(error,
1760                         EINVAL,
1761                         RTE_TM_ERROR_TYPE_NODE_ID,
1762                         NULL,
1763                         rte_strerror(EINVAL));
1764
1765         if (tm_node_search(dev, node_id))
1766                 return -rte_tm_error_set(error,
1767                         EEXIST,
1768                         RTE_TM_ERROR_TYPE_NODE_ID,
1769                         NULL,
1770                         rte_strerror(EEXIST));
1771
1772         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1773                 pn = NULL;
1774                 if (tm_root_node_present(dev))
1775                         return -rte_tm_error_set(error,
1776                                 EEXIST,
1777                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1778                                 NULL,
1779                                 rte_strerror(EEXIST));
1780         } else {
1781                 pn = tm_node_search(dev, parent_node_id);
1782                 if (pn == NULL)
1783                         return -rte_tm_error_set(error,
1784                                 EINVAL,
1785                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1786                                 NULL,
1787                                 rte_strerror(EINVAL));
1788         }
1789
1790         /* priority: must be 0 .. 3 */
1791         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1792                 return -rte_tm_error_set(error,
1793                         EINVAL,
1794                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1795                         NULL,
1796                         rte_strerror(EINVAL));
1797
1798         /* weight: must be 1 .. 255 */
1799         if (weight == 0 || weight >= UINT8_MAX)
1800                 return -rte_tm_error_set(error,
1801                         EINVAL,
1802                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1803                         NULL,
1804                         rte_strerror(EINVAL));
1805
1806         /* level_id: if valid, then
1807          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1808          *        -level_id must be zero
1809          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1810          *        -level_id must be parent level ID plus one
1811          */
1812         level = (pn == NULL) ? 0 : pn->level + 1;
1813         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1814                 return -rte_tm_error_set(error,
1815                         EINVAL,
1816                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1817                         NULL,
1818                         rte_strerror(EINVAL));
1819
1820         /* params: must not be NULL */
1821         if (params == NULL)
1822                 return -rte_tm_error_set(error,
1823                         EINVAL,
1824                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1825                         NULL,
1826                         rte_strerror(EINVAL));
1827
1828         /* params: per level checks */
1829         switch (level) {
1830         case TM_NODE_LEVEL_PORT:
1831                 status = node_add_check_port(dev, node_id,
1832                         parent_node_id, priority, weight, level_id,
1833                         params, error);
1834                 if (status)
1835                         return status;
1836                 break;
1837
1838         case TM_NODE_LEVEL_SUBPORT:
1839                 status = node_add_check_subport(dev, node_id,
1840                         parent_node_id, priority, weight, level_id,
1841                         params, error);
1842                 if (status)
1843                         return status;
1844                 break;
1845
1846         case TM_NODE_LEVEL_PIPE:
1847                 status = node_add_check_pipe(dev, node_id,
1848                         parent_node_id, priority, weight, level_id,
1849                         params, error);
1850                 if (status)
1851                         return status;
1852                 break;
1853
1854         case TM_NODE_LEVEL_TC:
1855                 status = node_add_check_tc(dev, node_id,
1856                         parent_node_id, priority, weight, level_id,
1857                         params, error);
1858                 if (status)
1859                         return status;
1860                 break;
1861
1862         case TM_NODE_LEVEL_QUEUE:
1863                 status = node_add_check_queue(dev, node_id,
1864                         parent_node_id, priority, weight, level_id,
1865                         params, error);
1866                 if (status)
1867                         return status;
1868                 break;
1869
1870         default:
1871                 return -rte_tm_error_set(error,
1872                         EINVAL,
1873                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1874                         NULL,
1875                         rte_strerror(EINVAL));
1876         }
1877
1878         return 0;
1879 }
1880
1881 /* Traffic manager node add */
1882 static int
1883 pmd_tm_node_add(struct rte_eth_dev *dev,
1884         uint32_t node_id,
1885         uint32_t parent_node_id,
1886         uint32_t priority,
1887         uint32_t weight,
1888         uint32_t level_id,
1889         struct rte_tm_node_params *params,
1890         struct rte_tm_error *error)
1891 {
1892         struct pmd_internals *p = dev->data->dev_private;
1893         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1894         struct tm_node *n;
1895         uint32_t i;
1896         int status;
1897
1898         /* Checks */
1899         if (p->soft.tm.hierarchy_frozen)
1900                 return -rte_tm_error_set(error,
1901                         EBUSY,
1902                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1903                         NULL,
1904                         rte_strerror(EBUSY));
1905
1906         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1907                 level_id, params, error);
1908         if (status)
1909                 return status;
1910
1911         /* Memory allocation */
1912         n = calloc(1, sizeof(struct tm_node));
1913         if (n == NULL)
1914                 return -rte_tm_error_set(error,
1915                         ENOMEM,
1916                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1917                         NULL,
1918                         rte_strerror(ENOMEM));
1919
1920         /* Fill in */
1921         n->node_id = node_id;
1922         n->parent_node_id = parent_node_id;
1923         n->priority = priority;
1924         n->weight = weight;
1925
1926         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1927                 n->parent_node = tm_node_search(dev, parent_node_id);
1928                 n->level = n->parent_node->level + 1;
1929         }
1930
1931         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1932                 n->shaper_profile = tm_shaper_profile_search(dev,
1933                         params->shaper_profile_id);
1934
1935         if (n->level == TM_NODE_LEVEL_QUEUE &&
1936                 params->leaf.cman == RTE_TM_CMAN_WRED)
1937                 n->wred_profile = tm_wred_profile_search(dev,
1938                         params->leaf.wred.wred_profile_id);
1939
1940         memcpy(&n->params, params, sizeof(n->params));
1941
1942         /* Add to list */
1943         TAILQ_INSERT_TAIL(nl, n, node);
1944         p->soft.tm.h.n_nodes++;
1945
1946         /* Update dependencies */
1947         if (n->parent_node)
1948                 n->parent_node->n_children++;
1949
1950         if (n->shaper_profile)
1951                 n->shaper_profile->n_users++;
1952
1953         for (i = 0; i < params->n_shared_shapers; i++) {
1954                 struct tm_shared_shaper *ss;
1955
1956                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1957                 ss->n_users++;
1958         }
1959
1960         if (n->wred_profile)
1961                 n->wred_profile->n_users++;
1962
1963         p->soft.tm.h.n_tm_nodes[n->level]++;
1964
1965         return 0;
1966 }
1967
1968 /* Traffic manager node delete */
1969 static int
1970 pmd_tm_node_delete(struct rte_eth_dev *dev,
1971         uint32_t node_id,
1972         struct rte_tm_error *error)
1973 {
1974         struct pmd_internals *p = dev->data->dev_private;
1975         struct tm_node *n;
1976         uint32_t i;
1977
1978         /* Check hierarchy changes are currently allowed */
1979         if (p->soft.tm.hierarchy_frozen)
1980                 return -rte_tm_error_set(error,
1981                         EBUSY,
1982                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1983                         NULL,
1984                         rte_strerror(EBUSY));
1985
1986         /* Check existing */
1987         n = tm_node_search(dev, node_id);
1988         if (n == NULL)
1989                 return -rte_tm_error_set(error,
1990                         EINVAL,
1991                         RTE_TM_ERROR_TYPE_NODE_ID,
1992                         NULL,
1993                         rte_strerror(EINVAL));
1994
1995         /* Check unused */
1996         if (n->n_children)
1997                 return -rte_tm_error_set(error,
1998                         EBUSY,
1999                         RTE_TM_ERROR_TYPE_NODE_ID,
2000                         NULL,
2001                         rte_strerror(EBUSY));
2002
2003         /* Update dependencies */
2004         p->soft.tm.h.n_tm_nodes[n->level]--;
2005
2006         if (n->wred_profile)
2007                 n->wred_profile->n_users--;
2008
2009         for (i = 0; i < n->params.n_shared_shapers; i++) {
2010                 struct tm_shared_shaper *ss;
2011
2012                 ss = tm_shared_shaper_search(dev,
2013                                 n->params.shared_shaper_id[i]);
2014                 ss->n_users--;
2015         }
2016
2017         if (n->shaper_profile)
2018                 n->shaper_profile->n_users--;
2019
2020         if (n->parent_node)
2021                 n->parent_node->n_children--;
2022
2023         /* Remove from list */
2024         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2025         p->soft.tm.h.n_nodes--;
2026         free(n);
2027
2028         return 0;
2029 }
2030
2031
2032 static void
2033 pipe_profile_build(struct rte_eth_dev *dev,
2034         struct tm_node *np,
2035         struct rte_sched_pipe_params *pp)
2036 {
2037         struct pmd_internals *p = dev->data->dev_private;
2038         struct tm_hierarchy *h = &p->soft.tm.h;
2039         struct tm_node_list *nl = &h->nodes;
2040         struct tm_node *nt, *nq;
2041
2042         memset(pp, 0, sizeof(*pp));
2043
2044         /* Pipe */
2045         pp->tb_rate = np->shaper_profile->params.peak.rate;
2046         pp->tb_size = np->shaper_profile->params.peak.size;
2047
2048         /* Traffic Class (TC) */
2049         pp->tc_period = PIPE_TC_PERIOD;
2050
2051 #ifdef RTE_SCHED_SUBPORT_TC_OV
2052         pp->tc_ov_weight = np->weight;
2053 #endif
2054
2055         TAILQ_FOREACH(nt, nl, node) {
2056                 uint32_t queue_id = 0;
2057
2058                 if (nt->level != TM_NODE_LEVEL_TC ||
2059                         nt->parent_node_id != np->node_id)
2060                         continue;
2061
2062                 pp->tc_rate[nt->priority] =
2063                         nt->shaper_profile->params.peak.rate;
2064
2065                 /* Queue */
2066                 TAILQ_FOREACH(nq, nl, node) {
2067                         uint32_t pipe_queue_id;
2068
2069                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2070                                 nq->parent_node_id != nt->node_id)
2071                                 continue;
2072
2073                         pipe_queue_id = nt->priority *
2074                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2075                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2076
2077                         queue_id++;
2078                 }
2079         }
2080 }
2081
2082 static int
2083 pipe_profile_free_exists(struct rte_eth_dev *dev,
2084         uint32_t *pipe_profile_id)
2085 {
2086         struct pmd_internals *p = dev->data->dev_private;
2087         struct tm_params *t = &p->soft.tm.params;
2088
2089         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2090                 *pipe_profile_id = t->n_pipe_profiles;
2091                 return 1;
2092         }
2093
2094         return 0;
2095 }
2096
2097 static int
2098 pipe_profile_exists(struct rte_eth_dev *dev,
2099         struct rte_sched_pipe_params *pp,
2100         uint32_t *pipe_profile_id)
2101 {
2102         struct pmd_internals *p = dev->data->dev_private;
2103         struct tm_params *t = &p->soft.tm.params;
2104         uint32_t i;
2105
2106         for (i = 0; i < t->n_pipe_profiles; i++)
2107                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2108                         if (pipe_profile_id)
2109                                 *pipe_profile_id = i;
2110                         return 1;
2111                 }
2112
2113         return 0;
2114 }
2115
2116 static void
2117 pipe_profile_install(struct rte_eth_dev *dev,
2118         struct rte_sched_pipe_params *pp,
2119         uint32_t pipe_profile_id)
2120 {
2121         struct pmd_internals *p = dev->data->dev_private;
2122         struct tm_params *t = &p->soft.tm.params;
2123
2124         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2125         t->n_pipe_profiles++;
2126 }
2127
2128 static void
2129 pipe_profile_mark(struct rte_eth_dev *dev,
2130         uint32_t subport_id,
2131         uint32_t pipe_id,
2132         uint32_t pipe_profile_id)
2133 {
2134         struct pmd_internals *p = dev->data->dev_private;
2135         struct tm_hierarchy *h = &p->soft.tm.h;
2136         struct tm_params *t = &p->soft.tm.params;
2137         uint32_t n_pipes_per_subport, pos;
2138
2139         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2140                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2141         pos = subport_id * n_pipes_per_subport + pipe_id;
2142
2143         t->pipe_to_profile[pos] = pipe_profile_id;
2144 }
2145
2146 static struct rte_sched_pipe_params *
2147 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2148 {
2149         struct pmd_internals *p = dev->data->dev_private;
2150         struct tm_hierarchy *h = &p->soft.tm.h;
2151         struct tm_params *t = &p->soft.tm.params;
2152         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2153                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2154
2155         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2156         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2157
2158         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2159         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2160
2161         return &t->pipe_profiles[pipe_profile_id];
2162 }
2163
2164 static int
2165 pipe_profiles_generate(struct rte_eth_dev *dev)
2166 {
2167         struct pmd_internals *p = dev->data->dev_private;
2168         struct tm_hierarchy *h = &p->soft.tm.h;
2169         struct tm_node_list *nl = &h->nodes;
2170         struct tm_node *ns, *np;
2171         uint32_t subport_id;
2172
2173         /* Objective: Fill in the following fields in struct tm_params:
2174          *    - pipe_profiles
2175          *    - n_pipe_profiles
2176          *    - pipe_to_profile
2177          */
2178
2179         subport_id = 0;
2180         TAILQ_FOREACH(ns, nl, node) {
2181                 uint32_t pipe_id;
2182
2183                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2184                         continue;
2185
2186                 pipe_id = 0;
2187                 TAILQ_FOREACH(np, nl, node) {
2188                         struct rte_sched_pipe_params pp;
2189                         uint32_t pos;
2190
2191                         if (np->level != TM_NODE_LEVEL_PIPE ||
2192                                 np->parent_node_id != ns->node_id)
2193                                 continue;
2194
2195                         pipe_profile_build(dev, np, &pp);
2196
2197                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2198                                 if (!pipe_profile_free_exists(dev, &pos))
2199                                         return -1;
2200
2201                                 pipe_profile_install(dev, &pp, pos);
2202                         }
2203
2204                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2205
2206                         pipe_id++;
2207                 }
2208
2209                 subport_id++;
2210         }
2211
2212         return 0;
2213 }
2214
2215 static struct tm_wred_profile *
2216 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2217 {
2218         struct pmd_internals *p = dev->data->dev_private;
2219         struct tm_hierarchy *h = &p->soft.tm.h;
2220         struct tm_node_list *nl = &h->nodes;
2221         struct tm_node *nq;
2222
2223         TAILQ_FOREACH(nq, nl, node) {
2224                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2225                         nq->parent_node->priority != tc_id)
2226                         continue;
2227
2228                 return nq->wred_profile;
2229         }
2230
2231         return NULL;
2232 }
2233
2234 #ifdef RTE_SCHED_RED
2235
2236 static void
2237 wred_profiles_set(struct rte_eth_dev *dev)
2238 {
2239         struct pmd_internals *p = dev->data->dev_private;
2240         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2241         uint32_t tc_id;
2242         enum rte_tm_color color;
2243
2244         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2245                 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2246                         struct rte_red_params *dst =
2247                                 &pp->red_params[tc_id][color];
2248                         struct tm_wred_profile *src_wp =
2249                                 tm_tc_wred_profile_get(dev, tc_id);
2250                         struct rte_tm_red_params *src =
2251                                 &src_wp->params.red_params[color];
2252
2253                         memcpy(dst, src, sizeof(*dst));
2254                 }
2255 }
2256
2257 #else
2258
2259 #define wred_profiles_set(dev)
2260
2261 #endif
2262
2263 static struct tm_shared_shaper *
2264 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2265 {
2266         return (tc_node->params.n_shared_shapers) ?
2267                 tm_shared_shaper_search(dev,
2268                         tc_node->params.shared_shaper_id[0]) :
2269                 NULL;
2270 }
2271
2272 static struct tm_shared_shaper *
2273 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2274         struct tm_node *subport_node,
2275         uint32_t tc_id)
2276 {
2277         struct pmd_internals *p = dev->data->dev_private;
2278         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2279         struct tm_node *n;
2280
2281         TAILQ_FOREACH(n, nl, node) {
2282                 if (n->level != TM_NODE_LEVEL_TC ||
2283                         n->parent_node->parent_node_id !=
2284                                 subport_node->node_id ||
2285                         n->priority != tc_id)
2286                         continue;
2287
2288                 return tm_tc_shared_shaper_get(dev, n);
2289         }
2290
2291         return NULL;
2292 }
2293
2294 static int
2295 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2296 {
2297         struct pmd_internals *p = dev->data->dev_private;
2298         struct tm_hierarchy *h = &p->soft.tm.h;
2299         struct tm_node_list *nl = &h->nodes;
2300         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2301         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2302         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2303         struct tm_shared_shaper *ss;
2304
2305         uint32_t n_pipes_per_subport;
2306
2307         /* Root node exists. */
2308         if (nr == NULL)
2309                 return -rte_tm_error_set(error,
2310                         EINVAL,
2311                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2312                         NULL,
2313                         rte_strerror(EINVAL));
2314
2315         /* There is at least one subport, max is not exceeded. */
2316         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2317                 return -rte_tm_error_set(error,
2318                         EINVAL,
2319                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2320                         NULL,
2321                         rte_strerror(EINVAL));
2322
2323         /* There is at least one pipe. */
2324         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2325                 return -rte_tm_error_set(error,
2326                         EINVAL,
2327                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2328                         NULL,
2329                         rte_strerror(EINVAL));
2330
2331         /* Number of pipes is the same for all subports. Maximum number of pipes
2332          * per subport is not exceeded.
2333          */
2334         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2335                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2336
2337         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2338                 return -rte_tm_error_set(error,
2339                         EINVAL,
2340                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2341                         NULL,
2342                         rte_strerror(EINVAL));
2343
2344         TAILQ_FOREACH(ns, nl, node) {
2345                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2346                         continue;
2347
2348                 if (ns->n_children != n_pipes_per_subport)
2349                         return -rte_tm_error_set(error,
2350                                 EINVAL,
2351                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2352                                 NULL,
2353                                 rte_strerror(EINVAL));
2354         }
2355
2356         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2357         TAILQ_FOREACH(np, nl, node) {
2358                 uint32_t mask = 0, mask_expected =
2359                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2360                                 uint32_t);
2361
2362                 if (np->level != TM_NODE_LEVEL_PIPE)
2363                         continue;
2364
2365                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2366                         return -rte_tm_error_set(error,
2367                                 EINVAL,
2368                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2369                                 NULL,
2370                                 rte_strerror(EINVAL));
2371
2372                 TAILQ_FOREACH(nt, nl, node) {
2373                         if (nt->level != TM_NODE_LEVEL_TC ||
2374                                 nt->parent_node_id != np->node_id)
2375                                 continue;
2376
2377                         mask |= 1 << nt->priority;
2378                 }
2379
2380                 if (mask != mask_expected)
2381                         return -rte_tm_error_set(error,
2382                                 EINVAL,
2383                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2384                                 NULL,
2385                                 rte_strerror(EINVAL));
2386         }
2387
2388         /* Each TC has exactly 4 packet queues. */
2389         TAILQ_FOREACH(nt, nl, node) {
2390                 if (nt->level != TM_NODE_LEVEL_TC)
2391                         continue;
2392
2393                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2394                         return -rte_tm_error_set(error,
2395                                 EINVAL,
2396                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2397                                 NULL,
2398                                 rte_strerror(EINVAL));
2399         }
2400
2401         /**
2402          * Shared shapers:
2403          *    -For each TC #i, all pipes in the same subport use the same
2404          *     shared shaper (or no shared shaper) for their TC#i.
2405          *    -Each shared shaper needs to have at least one user. All its
2406          *     users have to be TC nodes with the same priority and the same
2407          *     subport.
2408          */
2409         TAILQ_FOREACH(ns, nl, node) {
2410                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2411                 uint32_t id;
2412
2413                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2414                         continue;
2415
2416                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2417                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2418
2419                 TAILQ_FOREACH(nt, nl, node) {
2420                         struct tm_shared_shaper *subport_ss, *tc_ss;
2421
2422                         if (nt->level != TM_NODE_LEVEL_TC ||
2423                                 nt->parent_node->parent_node_id !=
2424                                         ns->node_id)
2425                                 continue;
2426
2427                         subport_ss = s[nt->priority];
2428                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2429
2430                         if (subport_ss == NULL && tc_ss == NULL)
2431                                 continue;
2432
2433                         if ((subport_ss == NULL && tc_ss != NULL) ||
2434                                 (subport_ss != NULL && tc_ss == NULL) ||
2435                                 subport_ss->shared_shaper_id !=
2436                                         tc_ss->shared_shaper_id)
2437                                 return -rte_tm_error_set(error,
2438                                         EINVAL,
2439                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2440                                         NULL,
2441                                         rte_strerror(EINVAL));
2442                 }
2443         }
2444
2445         TAILQ_FOREACH(ss, ssl, node) {
2446                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2447                 uint32_t n_users = 0;
2448
2449                 if (nt_any != NULL)
2450                         TAILQ_FOREACH(nt, nl, node) {
2451                                 if (nt->level != TM_NODE_LEVEL_TC ||
2452                                         nt->priority != nt_any->priority ||
2453                                         nt->parent_node->parent_node_id !=
2454                                         nt_any->parent_node->parent_node_id)
2455                                         continue;
2456
2457                                 n_users++;
2458                         }
2459
2460                 if (ss->n_users == 0 || ss->n_users != n_users)
2461                         return -rte_tm_error_set(error,
2462                                 EINVAL,
2463                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2464                                 NULL,
2465                                 rte_strerror(EINVAL));
2466         }
2467
2468         /* Not too many pipe profiles. */
2469         if (pipe_profiles_generate(dev))
2470                 return -rte_tm_error_set(error,
2471                         EINVAL,
2472                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2473                         NULL,
2474                         rte_strerror(EINVAL));
2475
2476         /**
2477          * WRED (when used, i.e. at least one WRED profile defined):
2478          *    -Each WRED profile must have at least one user.
2479          *    -All leaf nodes must have their private WRED context enabled.
2480          *    -For each TC #i, all leaf nodes must use the same WRED profile
2481          *     for their private WRED context.
2482          */
2483         if (h->n_wred_profiles) {
2484                 struct tm_wred_profile *wp;
2485                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2486                 uint32_t id;
2487
2488                 TAILQ_FOREACH(wp, wpl, node)
2489                         if (wp->n_users == 0)
2490                                 return -rte_tm_error_set(error,
2491                                         EINVAL,
2492                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2493                                         NULL,
2494                                         rte_strerror(EINVAL));
2495
2496                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2497                         w[id] = tm_tc_wred_profile_get(dev, id);
2498
2499                         if (w[id] == NULL)
2500                                 return -rte_tm_error_set(error,
2501                                         EINVAL,
2502                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2503                                         NULL,
2504                                         rte_strerror(EINVAL));
2505                 }
2506
2507                 TAILQ_FOREACH(nq, nl, node) {
2508                         uint32_t id;
2509
2510                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2511                                 continue;
2512
2513                         id = nq->parent_node->priority;
2514
2515                         if (nq->wred_profile == NULL ||
2516                                 nq->wred_profile->wred_profile_id !=
2517                                         w[id]->wred_profile_id)
2518                                 return -rte_tm_error_set(error,
2519                                         EINVAL,
2520                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2521                                         NULL,
2522                                         rte_strerror(EINVAL));
2523                 }
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void
2530 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2531 {
2532         struct pmd_internals *p = dev->data->dev_private;
2533         struct tm_params *t = &p->soft.tm.params;
2534         struct tm_hierarchy *h = &p->soft.tm.h;
2535
2536         struct tm_node_list *nl = &h->nodes;
2537         struct tm_node *root = tm_root_node_present(dev), *n;
2538
2539         uint32_t subport_id;
2540
2541         t->port_params = (struct rte_sched_port_params) {
2542                 .name = dev->data->name,
2543                 .socket = dev->data->numa_node,
2544                 .rate = root->shaper_profile->params.peak.rate,
2545                 .mtu = dev->data->mtu,
2546                 .frame_overhead =
2547                         root->shaper_profile->params.pkt_length_adjust,
2548                 .n_subports_per_port = root->n_children,
2549                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2550                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2551                 .qsize = {p->params.soft.tm.qsize[0],
2552                         p->params.soft.tm.qsize[1],
2553                         p->params.soft.tm.qsize[2],
2554                         p->params.soft.tm.qsize[3],
2555                 },
2556                 .pipe_profiles = t->pipe_profiles,
2557                 .n_pipe_profiles = t->n_pipe_profiles,
2558         };
2559
2560         wred_profiles_set(dev);
2561
2562         subport_id = 0;
2563         TAILQ_FOREACH(n, nl, node) {
2564                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2565                 uint32_t i;
2566
2567                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2568                         continue;
2569
2570                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2571                         struct tm_shared_shaper *ss;
2572                         struct tm_shaper_profile *sp;
2573
2574                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2575                         sp = (ss) ? tm_shaper_profile_search(dev,
2576                                 ss->shaper_profile_id) :
2577                                 n->shaper_profile;
2578                         tc_rate[i] = sp->params.peak.rate;
2579                 }
2580
2581                 t->subport_params[subport_id] =
2582                         (struct rte_sched_subport_params) {
2583                                 .tb_rate = n->shaper_profile->params.peak.rate,
2584                                 .tb_size = n->shaper_profile->params.peak.size,
2585
2586                                 .tc_rate = {tc_rate[0],
2587                                         tc_rate[1],
2588                                         tc_rate[2],
2589                                         tc_rate[3],
2590                         },
2591                         .tc_period = SUBPORT_TC_PERIOD,
2592                 };
2593
2594                 subport_id++;
2595         }
2596 }
2597
2598 /* Traffic manager hierarchy commit */
2599 static int
2600 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2601         int clear_on_fail,
2602         struct rte_tm_error *error)
2603 {
2604         struct pmd_internals *p = dev->data->dev_private;
2605         int status;
2606
2607         /* Checks */
2608         if (p->soft.tm.hierarchy_frozen)
2609                 return -rte_tm_error_set(error,
2610                         EBUSY,
2611                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2612                         NULL,
2613                         rte_strerror(EBUSY));
2614
2615         status = hierarchy_commit_check(dev, error);
2616         if (status) {
2617                 if (clear_on_fail) {
2618                         tm_hierarchy_uninit(p);
2619                         tm_hierarchy_init(p);
2620                 }
2621
2622                 return status;
2623         }
2624
2625         /* Create blueprints */
2626         hierarchy_blueprints_create(dev);
2627
2628         /* Freeze hierarchy */
2629         p->soft.tm.hierarchy_frozen = 1;
2630
2631         return 0;
2632 }
2633
2634 #ifdef RTE_SCHED_SUBPORT_TC_OV
2635
2636 static int
2637 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2638 {
2639         struct pmd_internals *p = dev->data->dev_private;
2640         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2641
2642         struct tm_node *ns = np->parent_node;
2643         uint32_t subport_id = tm_node_subport_id(dev, ns);
2644
2645         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2646         struct rte_sched_pipe_params profile1;
2647         uint32_t pipe_profile_id;
2648
2649         /* Derive new pipe profile. */
2650         memcpy(&profile1, profile0, sizeof(profile1));
2651         profile1.tc_ov_weight = (uint8_t)weight;
2652
2653         /* Since implementation does not allow adding more pipe profiles after
2654          * port configuration, the pipe configuration can be successfully
2655          * updated only if the new profile is also part of the existing set of
2656          * pipe profiles.
2657          */
2658         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2659                 return -1;
2660
2661         /* Update the pipe profile used by the current pipe. */
2662         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2663                 (int32_t)pipe_profile_id))
2664                 return -1;
2665
2666         /* Commit changes. */
2667         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2668         np->weight = weight;
2669
2670         return 0;
2671 }
2672
2673 #endif
2674
2675 static int
2676 update_queue_weight(struct rte_eth_dev *dev,
2677         struct tm_node *nq, uint32_t weight)
2678 {
2679         struct pmd_internals *p = dev->data->dev_private;
2680         uint32_t queue_id = tm_node_queue_id(dev, nq);
2681
2682         struct tm_node *nt = nq->parent_node;
2683         uint32_t tc_id = tm_node_tc_id(dev, nt);
2684
2685         struct tm_node *np = nt->parent_node;
2686         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2687
2688         struct tm_node *ns = np->parent_node;
2689         uint32_t subport_id = tm_node_subport_id(dev, ns);
2690
2691         uint32_t pipe_queue_id =
2692                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2693
2694         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2695         struct rte_sched_pipe_params profile1;
2696         uint32_t pipe_profile_id;
2697
2698         /* Derive new pipe profile. */
2699         memcpy(&profile1, profile0, sizeof(profile1));
2700         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2701
2702         /* Since implementation does not allow adding more pipe profiles after
2703          * port configuration, the pipe configuration can be successfully
2704          * updated only if the new profile is also part of the existing set
2705          * of pipe profiles.
2706          */
2707         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2708                 return -1;
2709
2710         /* Update the pipe profile used by the current pipe. */
2711         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2712                 (int32_t)pipe_profile_id))
2713                 return -1;
2714
2715         /* Commit changes. */
2716         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2717         nq->weight = weight;
2718
2719         return 0;
2720 }
2721
2722 /* Traffic manager node parent update */
2723 static int
2724 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2725         uint32_t node_id,
2726         uint32_t parent_node_id,
2727         uint32_t priority,
2728         uint32_t weight,
2729         struct rte_tm_error *error)
2730 {
2731         struct tm_node *n;
2732
2733         /* Port must be started and TM used. */
2734         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2735                 return -rte_tm_error_set(error,
2736                         EBUSY,
2737                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2738                         NULL,
2739                         rte_strerror(EBUSY));
2740
2741         /* Node must be valid */
2742         n = tm_node_search(dev, node_id);
2743         if (n == NULL)
2744                 return -rte_tm_error_set(error,
2745                         EINVAL,
2746                         RTE_TM_ERROR_TYPE_NODE_ID,
2747                         NULL,
2748                         rte_strerror(EINVAL));
2749
2750         /* Parent node must be the same */
2751         if (n->parent_node_id != parent_node_id)
2752                 return -rte_tm_error_set(error,
2753                         EINVAL,
2754                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2755                         NULL,
2756                         rte_strerror(EINVAL));
2757
2758         /* Priority must be the same */
2759         if (n->priority != priority)
2760                 return -rte_tm_error_set(error,
2761                         EINVAL,
2762                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2763                         NULL,
2764                         rte_strerror(EINVAL));
2765
2766         /* weight: must be 1 .. 255 */
2767         if (weight == 0 || weight >= UINT8_MAX)
2768                 return -rte_tm_error_set(error,
2769                         EINVAL,
2770                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2771                         NULL,
2772                         rte_strerror(EINVAL));
2773
2774         switch (n->level) {
2775         case TM_NODE_LEVEL_PORT:
2776                 return -rte_tm_error_set(error,
2777                         EINVAL,
2778                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2779                         NULL,
2780                         rte_strerror(EINVAL));
2781                 /* fall-through */
2782         case TM_NODE_LEVEL_SUBPORT:
2783                 return -rte_tm_error_set(error,
2784                         EINVAL,
2785                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2786                         NULL,
2787                         rte_strerror(EINVAL));
2788                 /* fall-through */
2789         case TM_NODE_LEVEL_PIPE:
2790 #ifdef RTE_SCHED_SUBPORT_TC_OV
2791                 if (update_pipe_weight(dev, n, weight))
2792                         return -rte_tm_error_set(error,
2793                                 EINVAL,
2794                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2795                                 NULL,
2796                                 rte_strerror(EINVAL));
2797                 return 0;
2798 #else
2799                 return -rte_tm_error_set(error,
2800                         EINVAL,
2801                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2802                         NULL,
2803                         rte_strerror(EINVAL));
2804 #endif
2805                 /* fall-through */
2806         case TM_NODE_LEVEL_TC:
2807                 return -rte_tm_error_set(error,
2808                         EINVAL,
2809                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2810                         NULL,
2811                         rte_strerror(EINVAL));
2812                 /* fall-through */
2813         case TM_NODE_LEVEL_QUEUE:
2814                 /* fall-through */
2815         default:
2816                 if (update_queue_weight(dev, n, weight))
2817                         return -rte_tm_error_set(error,
2818                                 EINVAL,
2819                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2820                                 NULL,
2821                                 rte_strerror(EINVAL));
2822                 return 0;
2823         }
2824 }
2825
2826 static int
2827 update_subport_rate(struct rte_eth_dev *dev,
2828         struct tm_node *ns,
2829         struct tm_shaper_profile *sp)
2830 {
2831         struct pmd_internals *p = dev->data->dev_private;
2832         uint32_t subport_id = tm_node_subport_id(dev, ns);
2833
2834         struct rte_sched_subport_params subport_params;
2835
2836         /* Derive new subport configuration. */
2837         memcpy(&subport_params,
2838                 &p->soft.tm.params.subport_params[subport_id],
2839                 sizeof(subport_params));
2840         subport_params.tb_rate = sp->params.peak.rate;
2841         subport_params.tb_size = sp->params.peak.size;
2842
2843         /* Update the subport configuration. */
2844         if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2845                 &subport_params))
2846                 return -1;
2847
2848         /* Commit changes. */
2849         ns->shaper_profile->n_users--;
2850
2851         ns->shaper_profile = sp;
2852         ns->params.shaper_profile_id = sp->shaper_profile_id;
2853         sp->n_users++;
2854
2855         memcpy(&p->soft.tm.params.subport_params[subport_id],
2856                 &subport_params,
2857                 sizeof(subport_params));
2858
2859         return 0;
2860 }
2861
2862 static int
2863 update_pipe_rate(struct rte_eth_dev *dev,
2864         struct tm_node *np,
2865         struct tm_shaper_profile *sp)
2866 {
2867         struct pmd_internals *p = dev->data->dev_private;
2868         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2869
2870         struct tm_node *ns = np->parent_node;
2871         uint32_t subport_id = tm_node_subport_id(dev, ns);
2872
2873         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2874         struct rte_sched_pipe_params profile1;
2875         uint32_t pipe_profile_id;
2876
2877         /* Derive new pipe profile. */
2878         memcpy(&profile1, profile0, sizeof(profile1));
2879         profile1.tb_rate = sp->params.peak.rate;
2880         profile1.tb_size = sp->params.peak.size;
2881
2882         /* Since implementation does not allow adding more pipe profiles after
2883          * port configuration, the pipe configuration can be successfully
2884          * updated only if the new profile is also part of the existing set of
2885          * pipe profiles.
2886          */
2887         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2888                 return -1;
2889
2890         /* Update the pipe profile used by the current pipe. */
2891         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2892                 (int32_t)pipe_profile_id))
2893                 return -1;
2894
2895         /* Commit changes. */
2896         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2897         np->shaper_profile->n_users--;
2898         np->shaper_profile = sp;
2899         np->params.shaper_profile_id = sp->shaper_profile_id;
2900         sp->n_users++;
2901
2902         return 0;
2903 }
2904
2905 static int
2906 update_tc_rate(struct rte_eth_dev *dev,
2907         struct tm_node *nt,
2908         struct tm_shaper_profile *sp)
2909 {
2910         struct pmd_internals *p = dev->data->dev_private;
2911         uint32_t tc_id = tm_node_tc_id(dev, nt);
2912
2913         struct tm_node *np = nt->parent_node;
2914         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2915
2916         struct tm_node *ns = np->parent_node;
2917         uint32_t subport_id = tm_node_subport_id(dev, ns);
2918
2919         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2920         struct rte_sched_pipe_params profile1;
2921         uint32_t pipe_profile_id;
2922
2923         /* Derive new pipe profile. */
2924         memcpy(&profile1, profile0, sizeof(profile1));
2925         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2926
2927         /* Since implementation does not allow adding more pipe profiles after
2928          * port configuration, the pipe configuration can be successfully
2929          * updated only if the new profile is also part of the existing set of
2930          * pipe profiles.
2931          */
2932         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2933                 return -1;
2934
2935         /* Update the pipe profile used by the current pipe. */
2936         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2937                 (int32_t)pipe_profile_id))
2938                 return -1;
2939
2940         /* Commit changes. */
2941         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2942         nt->shaper_profile->n_users--;
2943         nt->shaper_profile = sp;
2944         nt->params.shaper_profile_id = sp->shaper_profile_id;
2945         sp->n_users++;
2946
2947         return 0;
2948 }
2949
2950 /* Traffic manager node shaper update */
2951 static int
2952 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2953         uint32_t node_id,
2954         uint32_t shaper_profile_id,
2955         struct rte_tm_error *error)
2956 {
2957         struct tm_node *n;
2958         struct tm_shaper_profile *sp;
2959
2960         /* Port must be started and TM used. */
2961         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2962                 return -rte_tm_error_set(error,
2963                         EBUSY,
2964                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2965                         NULL,
2966                         rte_strerror(EBUSY));
2967
2968         /* Node must be valid */
2969         n = tm_node_search(dev, node_id);
2970         if (n == NULL)
2971                 return -rte_tm_error_set(error,
2972                         EINVAL,
2973                         RTE_TM_ERROR_TYPE_NODE_ID,
2974                         NULL,
2975                         rte_strerror(EINVAL));
2976
2977         /* Shaper profile must be valid. */
2978         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2979         if (sp == NULL)
2980                 return -rte_tm_error_set(error,
2981                         EINVAL,
2982                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2983                         NULL,
2984                         rte_strerror(EINVAL));
2985
2986         switch (n->level) {
2987         case TM_NODE_LEVEL_PORT:
2988                 return -rte_tm_error_set(error,
2989                         EINVAL,
2990                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2991                         NULL,
2992                         rte_strerror(EINVAL));
2993                 /* fall-through */
2994         case TM_NODE_LEVEL_SUBPORT:
2995                 if (update_subport_rate(dev, n, sp))
2996                         return -rte_tm_error_set(error,
2997                                 EINVAL,
2998                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2999                                 NULL,
3000                                 rte_strerror(EINVAL));
3001                 return 0;
3002                 /* fall-through */
3003         case TM_NODE_LEVEL_PIPE:
3004                 if (update_pipe_rate(dev, n, sp))
3005                         return -rte_tm_error_set(error,
3006                                 EINVAL,
3007                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3008                                 NULL,
3009                                 rte_strerror(EINVAL));
3010                 return 0;
3011                 /* fall-through */
3012         case TM_NODE_LEVEL_TC:
3013                 if (update_tc_rate(dev, n, sp))
3014                         return -rte_tm_error_set(error,
3015                                 EINVAL,
3016                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3017                                 NULL,
3018                                 rte_strerror(EINVAL));
3019                 return 0;
3020                 /* fall-through */
3021         case TM_NODE_LEVEL_QUEUE:
3022                 /* fall-through */
3023         default:
3024                 return -rte_tm_error_set(error,
3025                         EINVAL,
3026                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3027                         NULL,
3028                         rte_strerror(EINVAL));
3029         }
3030 }
3031
3032 static inline uint32_t
3033 tm_port_queue_id(struct rte_eth_dev *dev,
3034         uint32_t port_subport_id,
3035         uint32_t subport_pipe_id,
3036         uint32_t pipe_tc_id,
3037         uint32_t tc_queue_id)
3038 {
3039         struct pmd_internals *p = dev->data->dev_private;
3040         struct tm_hierarchy *h = &p->soft.tm.h;
3041         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3042                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3043
3044         uint32_t port_pipe_id =
3045                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3046         uint32_t port_tc_id =
3047                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3048         uint32_t port_queue_id =
3049                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3050
3051         return port_queue_id;
3052 }
3053
3054 static int
3055 read_port_stats(struct rte_eth_dev *dev,
3056         struct tm_node *nr,
3057         struct rte_tm_node_stats *stats,
3058         uint64_t *stats_mask,
3059         int clear)
3060 {
3061         struct pmd_internals *p = dev->data->dev_private;
3062         struct tm_hierarchy *h = &p->soft.tm.h;
3063         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3064         uint32_t subport_id;
3065
3066         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3067                 struct rte_sched_subport_stats s;
3068                 uint32_t tc_ov, id;
3069
3070                 /* Stats read */
3071                 int status = rte_sched_subport_read_stats(
3072                         p->soft.tm.sched,
3073                         subport_id,
3074                         &s,
3075                         &tc_ov);
3076                 if (status)
3077                         return status;
3078
3079                 /* Stats accumulate */
3080                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3081                         nr->stats.n_pkts +=
3082                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3083                         nr->stats.n_bytes +=
3084                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3085                         nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3086                                 s.n_pkts_tc_dropped[id];
3087                         nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3088                                 s.n_bytes_tc_dropped[id];
3089                 }
3090         }
3091
3092         /* Stats copy */
3093         if (stats)
3094                 memcpy(stats, &nr->stats, sizeof(*stats));
3095
3096         if (stats_mask)
3097                 *stats_mask = STATS_MASK_DEFAULT;
3098
3099         /* Stats clear */
3100         if (clear)
3101                 memset(&nr->stats, 0, sizeof(nr->stats));
3102
3103         return 0;
3104 }
3105
3106 static int
3107 read_subport_stats(struct rte_eth_dev *dev,
3108         struct tm_node *ns,
3109         struct rte_tm_node_stats *stats,
3110         uint64_t *stats_mask,
3111         int clear)
3112 {
3113         struct pmd_internals *p = dev->data->dev_private;
3114         uint32_t subport_id = tm_node_subport_id(dev, ns);
3115         struct rte_sched_subport_stats s;
3116         uint32_t tc_ov, tc_id;
3117
3118         /* Stats read */
3119         int status = rte_sched_subport_read_stats(
3120                 p->soft.tm.sched,
3121                 subport_id,
3122                 &s,
3123                 &tc_ov);
3124         if (status)
3125                 return status;
3126
3127         /* Stats accumulate */
3128         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3129                 ns->stats.n_pkts +=
3130                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3131                 ns->stats.n_bytes +=
3132                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3133                 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3134                         s.n_pkts_tc_dropped[tc_id];
3135                 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3136                         s.n_bytes_tc_dropped[tc_id];
3137         }
3138
3139         /* Stats copy */
3140         if (stats)
3141                 memcpy(stats, &ns->stats, sizeof(*stats));
3142
3143         if (stats_mask)
3144                 *stats_mask = STATS_MASK_DEFAULT;
3145
3146         /* Stats clear */
3147         if (clear)
3148                 memset(&ns->stats, 0, sizeof(ns->stats));
3149
3150         return 0;
3151 }
3152
3153 static int
3154 read_pipe_stats(struct rte_eth_dev *dev,
3155         struct tm_node *np,
3156         struct rte_tm_node_stats *stats,
3157         uint64_t *stats_mask,
3158         int clear)
3159 {
3160         struct pmd_internals *p = dev->data->dev_private;
3161
3162         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3163
3164         struct tm_node *ns = np->parent_node;
3165         uint32_t subport_id = tm_node_subport_id(dev, ns);
3166
3167         uint32_t i;
3168
3169         /* Stats read */
3170         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3171                 struct rte_sched_queue_stats s;
3172                 uint16_t qlen;
3173
3174                 uint32_t qid = tm_port_queue_id(dev,
3175                         subport_id,
3176                         pipe_id,
3177                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3178                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3179
3180                 int status = rte_sched_queue_read_stats(
3181                         p->soft.tm.sched,
3182                         qid,
3183                         &s,
3184                         &qlen);
3185                 if (status)
3186                         return status;
3187
3188                 /* Stats accumulate */
3189                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3190                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3191                 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3192                 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3193                         s.n_bytes_dropped;
3194                 np->stats.leaf.n_pkts_queued = qlen;
3195         }
3196
3197         /* Stats copy */
3198         if (stats)
3199                 memcpy(stats, &np->stats, sizeof(*stats));
3200
3201         if (stats_mask)
3202                 *stats_mask = STATS_MASK_DEFAULT;
3203
3204         /* Stats clear */
3205         if (clear)
3206                 memset(&np->stats, 0, sizeof(np->stats));
3207
3208         return 0;
3209 }
3210
3211 static int
3212 read_tc_stats(struct rte_eth_dev *dev,
3213         struct tm_node *nt,
3214         struct rte_tm_node_stats *stats,
3215         uint64_t *stats_mask,
3216         int clear)
3217 {
3218         struct pmd_internals *p = dev->data->dev_private;
3219
3220         uint32_t tc_id = tm_node_tc_id(dev, nt);
3221
3222         struct tm_node *np = nt->parent_node;
3223         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3224
3225         struct tm_node *ns = np->parent_node;
3226         uint32_t subport_id = tm_node_subport_id(dev, ns);
3227
3228         uint32_t i;
3229
3230         /* Stats read */
3231         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3232                 struct rte_sched_queue_stats s;
3233                 uint16_t qlen;
3234
3235                 uint32_t qid = tm_port_queue_id(dev,
3236                         subport_id,
3237                         pipe_id,
3238                         tc_id,
3239                         i);
3240
3241                 int status = rte_sched_queue_read_stats(
3242                         p->soft.tm.sched,
3243                         qid,
3244                         &s,
3245                         &qlen);
3246                 if (status)
3247                         return status;
3248
3249                 /* Stats accumulate */
3250                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3251                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3252                 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3253                 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3254                         s.n_bytes_dropped;
3255                 nt->stats.leaf.n_pkts_queued = qlen;
3256         }
3257
3258         /* Stats copy */
3259         if (stats)
3260                 memcpy(stats, &nt->stats, sizeof(*stats));
3261
3262         if (stats_mask)
3263                 *stats_mask = STATS_MASK_DEFAULT;
3264
3265         /* Stats clear */
3266         if (clear)
3267                 memset(&nt->stats, 0, sizeof(nt->stats));
3268
3269         return 0;
3270 }
3271
3272 static int
3273 read_queue_stats(struct rte_eth_dev *dev,
3274         struct tm_node *nq,
3275         struct rte_tm_node_stats *stats,
3276         uint64_t *stats_mask,
3277         int clear)
3278 {
3279         struct pmd_internals *p = dev->data->dev_private;
3280         struct rte_sched_queue_stats s;
3281         uint16_t qlen;
3282
3283         uint32_t queue_id = tm_node_queue_id(dev, nq);
3284
3285         struct tm_node *nt = nq->parent_node;
3286         uint32_t tc_id = tm_node_tc_id(dev, nt);
3287
3288         struct tm_node *np = nt->parent_node;
3289         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3290
3291         struct tm_node *ns = np->parent_node;
3292         uint32_t subport_id = tm_node_subport_id(dev, ns);
3293
3294         /* Stats read */
3295         uint32_t qid = tm_port_queue_id(dev,
3296                 subport_id,
3297                 pipe_id,
3298                 tc_id,
3299                 queue_id);
3300
3301         int status = rte_sched_queue_read_stats(
3302                 p->soft.tm.sched,
3303                 qid,
3304                 &s,
3305                 &qlen);
3306         if (status)
3307                 return status;
3308
3309         /* Stats accumulate */
3310         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3311         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3312         nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3313         nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3314                 s.n_bytes_dropped;
3315         nq->stats.leaf.n_pkts_queued = qlen;
3316
3317         /* Stats copy */
3318         if (stats)
3319                 memcpy(stats, &nq->stats, sizeof(*stats));
3320
3321         if (stats_mask)
3322                 *stats_mask = STATS_MASK_QUEUE;
3323
3324         /* Stats clear */
3325         if (clear)
3326                 memset(&nq->stats, 0, sizeof(nq->stats));
3327
3328         return 0;
3329 }
3330
3331 /* Traffic manager read stats counters for specific node */
3332 static int
3333 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3334         uint32_t node_id,
3335         struct rte_tm_node_stats *stats,
3336         uint64_t *stats_mask,
3337         int clear,
3338         struct rte_tm_error *error)
3339 {
3340         struct tm_node *n;
3341
3342         /* Port must be started and TM used. */
3343         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3344                 return -rte_tm_error_set(error,
3345                         EBUSY,
3346                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3347                         NULL,
3348                         rte_strerror(EBUSY));
3349
3350         /* Node must be valid */
3351         n = tm_node_search(dev, node_id);
3352         if (n == NULL)
3353                 return -rte_tm_error_set(error,
3354                         EINVAL,
3355                         RTE_TM_ERROR_TYPE_NODE_ID,
3356                         NULL,
3357                         rte_strerror(EINVAL));
3358
3359         switch (n->level) {
3360         case TM_NODE_LEVEL_PORT:
3361                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3362                         return -rte_tm_error_set(error,
3363                                 EINVAL,
3364                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3365                                 NULL,
3366                                 rte_strerror(EINVAL));
3367                 return 0;
3368
3369         case TM_NODE_LEVEL_SUBPORT:
3370                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3371                         return -rte_tm_error_set(error,
3372                                 EINVAL,
3373                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3374                                 NULL,
3375                                 rte_strerror(EINVAL));
3376                 return 0;
3377
3378         case TM_NODE_LEVEL_PIPE:
3379                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3380                         return -rte_tm_error_set(error,
3381                                 EINVAL,
3382                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3383                                 NULL,
3384                                 rte_strerror(EINVAL));
3385                 return 0;
3386
3387         case TM_NODE_LEVEL_TC:
3388                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3389                         return -rte_tm_error_set(error,
3390                                 EINVAL,
3391                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3392                                 NULL,
3393                                 rte_strerror(EINVAL));
3394                 return 0;
3395
3396         case TM_NODE_LEVEL_QUEUE:
3397         default:
3398                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3399                         return -rte_tm_error_set(error,
3400                                 EINVAL,
3401                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3402                                 NULL,
3403                                 rte_strerror(EINVAL));
3404                 return 0;
3405         }
3406 }
3407
3408 const struct rte_tm_ops pmd_tm_ops = {
3409         .node_type_get = pmd_tm_node_type_get,
3410         .capabilities_get = pmd_tm_capabilities_get,
3411         .level_capabilities_get = pmd_tm_level_capabilities_get,
3412         .node_capabilities_get = pmd_tm_node_capabilities_get,
3413
3414         .wred_profile_add = pmd_tm_wred_profile_add,
3415         .wred_profile_delete = pmd_tm_wred_profile_delete,
3416         .shared_wred_context_add_update = NULL,
3417         .shared_wred_context_delete = NULL,
3418
3419         .shaper_profile_add = pmd_tm_shaper_profile_add,
3420         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3421         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3422         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3423
3424         .node_add = pmd_tm_node_add,
3425         .node_delete = pmd_tm_node_delete,
3426         .node_suspend = NULL,
3427         .node_resume = NULL,
3428         .hierarchy_commit = pmd_tm_hierarchy_commit,
3429
3430         .node_parent_update = pmd_tm_node_parent_update,
3431         .node_shaper_update = pmd_tm_node_shaper_update,
3432         .node_shared_shaper_update = NULL,
3433         .node_stats_update = NULL,
3434         .node_wfq_weight_mode_update = NULL,
3435         .node_cman_update = NULL,
3436         .node_wred_context_update = NULL,
3437         .node_shared_wred_context_update = NULL,
3438
3439         .node_stats_read = pmd_tm_node_stats_read,
3440 };