net/mlx5: fix packet length assert in MPRQ
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21         TAILQ_INIT(&p->tmgr_port_list);
22
23         return 0;
24 }
25
26 void
27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29         for ( ; ; ) {
30                 struct softnic_tmgr_port *tmgr_port;
31
32                 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33                 if (tmgr_port == NULL)
34                         break;
35
36                 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37                 rte_sched_port_free(tmgr_port->s);
38                 free(tmgr_port);
39         }
40 }
41
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
44         const char *name)
45 {
46         struct softnic_tmgr_port *tmgr_port;
47
48         if (name == NULL)
49                 return NULL;
50
51         TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52                 if (strcmp(tmgr_port->name, name) == 0)
53                         return tmgr_port;
54
55         return NULL;
56 }
57
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
60         const char *name)
61 {
62         struct softnic_tmgr_port *tmgr_port;
63         struct tm_params *t = &p->soft.tm.params;
64         struct rte_sched_port *sched;
65         uint32_t n_subports, subport_id;
66
67         /* Check input params */
68         if (name == NULL ||
69                 softnic_tmgr_port_find(p, name))
70                 return NULL;
71
72         /*
73          * Resource
74          */
75
76         /* Is hierarchy frozen? */
77         if (p->soft.tm.hierarchy_frozen == 0)
78                 return NULL;
79
80         /* Port */
81         sched = rte_sched_port_config(&t->port_params);
82         if (sched == NULL)
83                 return NULL;
84
85         /* Subport */
86         n_subports = t->port_params.n_subports_per_port;
87         for (subport_id = 0; subport_id < n_subports; subport_id++) {
88                 uint32_t n_pipes_per_subport =
89                         t->subport_params[subport_id].n_pipes_per_subport_enabled;
90                 uint32_t pipe_id;
91                 int status;
92
93                 status = rte_sched_subport_config(sched,
94                         subport_id,
95                         &t->subport_params[subport_id]);
96                 if (status) {
97                         rte_sched_port_free(sched);
98                         return NULL;
99                 }
100
101                 /* Pipe */
102                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
103                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
104                         int profile_id = t->pipe_to_profile[pos];
105
106                         if (profile_id < 0)
107                                 continue;
108
109                         status = rte_sched_pipe_config(sched,
110                                 subport_id,
111                                 pipe_id,
112                                 profile_id);
113                         if (status) {
114                                 rte_sched_port_free(sched);
115                                 return NULL;
116                         }
117                 }
118         }
119
120         /* Node allocation */
121         tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
122         if (tmgr_port == NULL) {
123                 rte_sched_port_free(sched);
124                 return NULL;
125         }
126
127         /* Node fill in */
128         strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
129         tmgr_port->s = sched;
130
131         /* Node add to list */
132         TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
133
134         return tmgr_port;
135 }
136
137 static struct rte_sched_port *
138 SCHED(struct pmd_internals *p)
139 {
140         struct softnic_tmgr_port *tmgr_port;
141
142         tmgr_port = softnic_tmgr_port_find(p, "TMGR");
143         if (tmgr_port == NULL)
144                 return NULL;
145
146         return tmgr_port->s;
147 }
148
149 void
150 tm_hierarchy_init(struct pmd_internals *p)
151 {
152         memset(&p->soft.tm, 0, sizeof(p->soft.tm));
153
154         /* Initialize shaper profile list */
155         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
156
157         /* Initialize shared shaper list */
158         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
159
160         /* Initialize wred profile list */
161         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
162
163         /* Initialize TM node list */
164         TAILQ_INIT(&p->soft.tm.h.nodes);
165 }
166
167 void
168 tm_hierarchy_free(struct pmd_internals *p)
169 {
170         /* Remove all nodes*/
171         for ( ; ; ) {
172                 struct tm_node *tm_node;
173
174                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
175                 if (tm_node == NULL)
176                         break;
177
178                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
179                 free(tm_node);
180         }
181
182         /* Remove all WRED profiles */
183         for ( ; ; ) {
184                 struct tm_wred_profile *wred_profile;
185
186                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
187                 if (wred_profile == NULL)
188                         break;
189
190                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
191                 free(wred_profile);
192         }
193
194         /* Remove all shared shapers */
195         for ( ; ; ) {
196                 struct tm_shared_shaper *shared_shaper;
197
198                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
199                 if (shared_shaper == NULL)
200                         break;
201
202                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
203                 free(shared_shaper);
204         }
205
206         /* Remove all shaper profiles */
207         for ( ; ; ) {
208                 struct tm_shaper_profile *shaper_profile;
209
210                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
211                 if (shaper_profile == NULL)
212                         break;
213
214                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
215                         shaper_profile, node);
216                 free(shaper_profile);
217         }
218
219         tm_hierarchy_init(p);
220 }
221
222 static struct tm_shaper_profile *
223 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
224 {
225         struct pmd_internals *p = dev->data->dev_private;
226         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
227         struct tm_shaper_profile *sp;
228
229         TAILQ_FOREACH(sp, spl, node)
230                 if (shaper_profile_id == sp->shaper_profile_id)
231                         return sp;
232
233         return NULL;
234 }
235
236 static struct tm_shared_shaper *
237 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
238 {
239         struct pmd_internals *p = dev->data->dev_private;
240         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
241         struct tm_shared_shaper *ss;
242
243         TAILQ_FOREACH(ss, ssl, node)
244                 if (shared_shaper_id == ss->shared_shaper_id)
245                         return ss;
246
247         return NULL;
248 }
249
250 static struct tm_wred_profile *
251 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
252 {
253         struct pmd_internals *p = dev->data->dev_private;
254         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
255         struct tm_wred_profile *wp;
256
257         TAILQ_FOREACH(wp, wpl, node)
258                 if (wred_profile_id == wp->wred_profile_id)
259                         return wp;
260
261         return NULL;
262 }
263
264 static struct tm_node *
265 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
266 {
267         struct pmd_internals *p = dev->data->dev_private;
268         struct tm_node_list *nl = &p->soft.tm.h.nodes;
269         struct tm_node *n;
270
271         TAILQ_FOREACH(n, nl, node)
272                 if (n->node_id == node_id)
273                         return n;
274
275         return NULL;
276 }
277
278 static struct tm_node *
279 tm_root_node_present(struct rte_eth_dev *dev)
280 {
281         struct pmd_internals *p = dev->data->dev_private;
282         struct tm_node_list *nl = &p->soft.tm.h.nodes;
283         struct tm_node *n;
284
285         TAILQ_FOREACH(n, nl, node)
286                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
287                         return n;
288
289         return NULL;
290 }
291
292 static uint32_t
293 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
294 {
295         struct pmd_internals *p = dev->data->dev_private;
296         struct tm_node_list *nl = &p->soft.tm.h.nodes;
297         struct tm_node *ns;
298         uint32_t subport_id;
299
300         subport_id = 0;
301         TAILQ_FOREACH(ns, nl, node) {
302                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
303                         continue;
304
305                 if (ns->node_id == subport_node->node_id)
306                         return subport_id;
307
308                 subport_id++;
309         }
310
311         return UINT32_MAX;
312 }
313
314 static uint32_t
315 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
316 {
317         struct pmd_internals *p = dev->data->dev_private;
318         struct tm_node_list *nl = &p->soft.tm.h.nodes;
319         struct tm_node *np;
320         uint32_t pipe_id;
321
322         pipe_id = 0;
323         TAILQ_FOREACH(np, nl, node) {
324                 if (np->level != TM_NODE_LEVEL_PIPE ||
325                         np->parent_node_id != pipe_node->parent_node_id)
326                         continue;
327
328                 if (np->node_id == pipe_node->node_id)
329                         return pipe_id;
330
331                 pipe_id++;
332         }
333
334         return UINT32_MAX;
335 }
336
337 static uint32_t
338 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
339 {
340         return tc_node->priority;
341 }
342
343 static uint32_t
344 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
345 {
346         struct pmd_internals *p = dev->data->dev_private;
347         struct tm_node_list *nl = &p->soft.tm.h.nodes;
348         struct tm_node *nq;
349         uint32_t queue_id;
350
351         queue_id = 0;
352         TAILQ_FOREACH(nq, nl, node) {
353                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
354                         nq->parent_node_id != queue_node->parent_node_id)
355                         continue;
356
357                 if (nq->node_id == queue_node->node_id)
358                         return queue_id;
359
360                 queue_id++;
361         }
362
363         return UINT32_MAX;
364 }
365
366 static uint32_t
367 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
368 {
369         struct pmd_internals *p = dev->data->dev_private;
370         uint32_t n_queues_max = p->params.tm.n_queues;
371         uint32_t n_tc_max =
372                 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
373                 / RTE_SCHED_QUEUES_PER_PIPE;
374         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
375         uint32_t n_subports_max = n_pipes_max;
376         uint32_t n_root_max = 1;
377
378         switch (level) {
379         case TM_NODE_LEVEL_PORT:
380                 return n_root_max;
381         case TM_NODE_LEVEL_SUBPORT:
382                 return n_subports_max;
383         case TM_NODE_LEVEL_PIPE:
384                 return n_pipes_max;
385         case TM_NODE_LEVEL_TC:
386                 return n_tc_max;
387         case TM_NODE_LEVEL_QUEUE:
388         default:
389                 return n_queues_max;
390         }
391 }
392
393 /* Traffic manager node type get */
394 static int
395 pmd_tm_node_type_get(struct rte_eth_dev *dev,
396         uint32_t node_id,
397         int *is_leaf,
398         struct rte_tm_error *error)
399 {
400         struct pmd_internals *p = dev->data->dev_private;
401
402         if (is_leaf == NULL)
403                 return -rte_tm_error_set(error,
404                    EINVAL,
405                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
406                    NULL,
407                    rte_strerror(EINVAL));
408
409         if (node_id == RTE_TM_NODE_ID_NULL ||
410                 (tm_node_search(dev, node_id) == NULL))
411                 return -rte_tm_error_set(error,
412                    EINVAL,
413                    RTE_TM_ERROR_TYPE_NODE_ID,
414                    NULL,
415                    rte_strerror(EINVAL));
416
417         *is_leaf = node_id < p->params.tm.n_queues;
418
419         return 0;
420 }
421
422 #ifdef RTE_SCHED_RED
423 #define WRED_SUPPORTED                                          1
424 #else
425 #define WRED_SUPPORTED                                          0
426 #endif
427
428 #define STATS_MASK_DEFAULT                                      \
429         (RTE_TM_STATS_N_PKTS |                                  \
430         RTE_TM_STATS_N_BYTES |                                  \
431         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
432         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
433
434 #define STATS_MASK_QUEUE                                                \
435         (STATS_MASK_DEFAULT |                                   \
436         RTE_TM_STATS_N_PKTS_QUEUED)
437
438 static const struct rte_tm_capabilities tm_cap = {
439         .n_nodes_max = UINT32_MAX,
440         .n_levels_max = TM_NODE_LEVEL_MAX,
441
442         .non_leaf_nodes_identical = 0,
443         .leaf_nodes_identical = 1,
444
445         .shaper_n_max = UINT32_MAX,
446         .shaper_private_n_max = UINT32_MAX,
447         .shaper_private_dual_rate_n_max = 0,
448         .shaper_private_rate_min = 1,
449         .shaper_private_rate_max = UINT32_MAX,
450
451         .shaper_shared_n_max = UINT32_MAX,
452         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
453         .shaper_shared_n_shapers_per_node_max = 1,
454         .shaper_shared_dual_rate_n_max = 0,
455         .shaper_shared_rate_min = 1,
456         .shaper_shared_rate_max = UINT32_MAX,
457
458         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
459         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
460
461         .sched_n_children_max = UINT32_MAX,
462         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
463         .sched_wfq_n_children_per_group_max = UINT32_MAX,
464         .sched_wfq_n_groups_max = 1,
465         .sched_wfq_weight_max = UINT32_MAX,
466
467         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
468         .cman_wred_byte_mode_supported = 0,
469         .cman_head_drop_supported = 0,
470         .cman_wred_context_n_max = 0,
471         .cman_wred_context_private_n_max = 0,
472         .cman_wred_context_shared_n_max = 0,
473         .cman_wred_context_shared_n_nodes_per_context_max = 0,
474         .cman_wred_context_shared_n_contexts_per_node_max = 0,
475
476         .mark_vlan_dei_supported = {0, 0, 0},
477         .mark_ip_ecn_tcp_supported = {0, 0, 0},
478         .mark_ip_ecn_sctp_supported = {0, 0, 0},
479         .mark_ip_dscp_supported = {0, 0, 0},
480
481         .dynamic_update_mask = 0,
482
483         .stats_mask = STATS_MASK_QUEUE,
484 };
485
486 /* Traffic manager capabilities get */
487 static int
488 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
489         struct rte_tm_capabilities *cap,
490         struct rte_tm_error *error)
491 {
492         if (cap == NULL)
493                 return -rte_tm_error_set(error,
494                    EINVAL,
495                    RTE_TM_ERROR_TYPE_CAPABILITIES,
496                    NULL,
497                    rte_strerror(EINVAL));
498
499         memcpy(cap, &tm_cap, sizeof(*cap));
500
501         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
502                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
503                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
504                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
505                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
506
507         cap->shaper_private_n_max =
508                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
509                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
510                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
511                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
512
513         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
514                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
515
516         cap->shaper_n_max = cap->shaper_private_n_max +
517                 cap->shaper_shared_n_max;
518
519         cap->shaper_shared_n_nodes_per_shaper_max =
520                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
521
522         cap->sched_n_children_max = RTE_MAX(
523                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
524                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
525
526         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
527
528         if (WRED_SUPPORTED)
529                 cap->cman_wred_context_private_n_max =
530                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
531
532         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
533                 cap->cman_wred_context_shared_n_max;
534
535         return 0;
536 }
537
538 static const struct rte_tm_level_capabilities tm_level_cap[] = {
539         [TM_NODE_LEVEL_PORT] = {
540                 .n_nodes_max = 1,
541                 .n_nodes_nonleaf_max = 1,
542                 .n_nodes_leaf_max = 0,
543                 .non_leaf_nodes_identical = 1,
544                 .leaf_nodes_identical = 0,
545
546                 {.nonleaf = {
547                         .shaper_private_supported = 1,
548                         .shaper_private_dual_rate_supported = 0,
549                         .shaper_private_rate_min = 1,
550                         .shaper_private_rate_max = UINT32_MAX,
551                         .shaper_shared_n_max = 0,
552
553                         .sched_n_children_max = UINT32_MAX,
554                         .sched_sp_n_priorities_max = 1,
555                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
556                         .sched_wfq_n_groups_max = 1,
557                         .sched_wfq_weight_max = 1,
558
559                         .stats_mask = STATS_MASK_DEFAULT,
560                 } },
561         },
562
563         [TM_NODE_LEVEL_SUBPORT] = {
564                 .n_nodes_max = UINT32_MAX,
565                 .n_nodes_nonleaf_max = UINT32_MAX,
566                 .n_nodes_leaf_max = 0,
567                 .non_leaf_nodes_identical = 1,
568                 .leaf_nodes_identical = 0,
569
570                 {.nonleaf = {
571                         .shaper_private_supported = 1,
572                         .shaper_private_dual_rate_supported = 0,
573                         .shaper_private_rate_min = 1,
574                         .shaper_private_rate_max = UINT32_MAX,
575                         .shaper_shared_n_max = 0,
576
577                         .sched_n_children_max = UINT32_MAX,
578                         .sched_sp_n_priorities_max = 1,
579                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
580                         .sched_wfq_n_groups_max = 1,
581 #ifdef RTE_SCHED_SUBPORT_TC_OV
582                         .sched_wfq_weight_max = UINT32_MAX,
583 #else
584                         .sched_wfq_weight_max = 1,
585 #endif
586                         .stats_mask = STATS_MASK_DEFAULT,
587                 } },
588         },
589
590         [TM_NODE_LEVEL_PIPE] = {
591                 .n_nodes_max = UINT32_MAX,
592                 .n_nodes_nonleaf_max = UINT32_MAX,
593                 .n_nodes_leaf_max = 0,
594                 .non_leaf_nodes_identical = 1,
595                 .leaf_nodes_identical = 0,
596
597                 {.nonleaf = {
598                         .shaper_private_supported = 1,
599                         .shaper_private_dual_rate_supported = 0,
600                         .shaper_private_rate_min = 1,
601                         .shaper_private_rate_max = UINT32_MAX,
602                         .shaper_shared_n_max = 0,
603
604                         .sched_n_children_max =
605                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
606                         .sched_sp_n_priorities_max =
607                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
608                         .sched_wfq_n_children_per_group_max = 1,
609                         .sched_wfq_n_groups_max = 0,
610                         .sched_wfq_weight_max = 1,
611
612                         .stats_mask = STATS_MASK_DEFAULT,
613                 } },
614         },
615
616         [TM_NODE_LEVEL_TC] = {
617                 .n_nodes_max = UINT32_MAX,
618                 .n_nodes_nonleaf_max = UINT32_MAX,
619                 .n_nodes_leaf_max = 0,
620                 .non_leaf_nodes_identical = 1,
621                 .leaf_nodes_identical = 0,
622
623                 {.nonleaf = {
624                         .shaper_private_supported = 1,
625                         .shaper_private_dual_rate_supported = 0,
626                         .shaper_private_rate_min = 1,
627                         .shaper_private_rate_max = UINT32_MAX,
628                         .shaper_shared_n_max = 1,
629
630                         .sched_n_children_max =
631                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
632                         .sched_sp_n_priorities_max = 1,
633                         .sched_wfq_n_children_per_group_max =
634                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
635                         .sched_wfq_n_groups_max = 1,
636                         .sched_wfq_weight_max = UINT32_MAX,
637
638                         .stats_mask = STATS_MASK_DEFAULT,
639                 } },
640         },
641
642         [TM_NODE_LEVEL_QUEUE] = {
643                 .n_nodes_max = UINT32_MAX,
644                 .n_nodes_nonleaf_max = 0,
645                 .n_nodes_leaf_max = UINT32_MAX,
646                 .non_leaf_nodes_identical = 0,
647                 .leaf_nodes_identical = 1,
648
649                 {.leaf = {
650                         .shaper_private_supported = 0,
651                         .shaper_private_dual_rate_supported = 0,
652                         .shaper_private_rate_min = 0,
653                         .shaper_private_rate_max = 0,
654                         .shaper_shared_n_max = 0,
655
656                         .cman_head_drop_supported = 0,
657                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
658                         .cman_wred_byte_mode_supported = 0,
659                         .cman_wred_context_private_supported = WRED_SUPPORTED,
660                         .cman_wred_context_shared_n_max = 0,
661
662                         .stats_mask = STATS_MASK_QUEUE,
663                 } },
664         },
665 };
666
667 /* Traffic manager level capabilities get */
668 static int
669 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
670         uint32_t level_id,
671         struct rte_tm_level_capabilities *cap,
672         struct rte_tm_error *error)
673 {
674         if (cap == NULL)
675                 return -rte_tm_error_set(error,
676                    EINVAL,
677                    RTE_TM_ERROR_TYPE_CAPABILITIES,
678                    NULL,
679                    rte_strerror(EINVAL));
680
681         if (level_id >= TM_NODE_LEVEL_MAX)
682                 return -rte_tm_error_set(error,
683                    EINVAL,
684                    RTE_TM_ERROR_TYPE_LEVEL_ID,
685                    NULL,
686                    rte_strerror(EINVAL));
687
688         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
689
690         switch (level_id) {
691         case TM_NODE_LEVEL_PORT:
692                 cap->nonleaf.sched_n_children_max =
693                         tm_level_get_max_nodes(dev,
694                                 TM_NODE_LEVEL_SUBPORT);
695                 cap->nonleaf.sched_wfq_n_children_per_group_max =
696                         cap->nonleaf.sched_n_children_max;
697                 break;
698
699         case TM_NODE_LEVEL_SUBPORT:
700                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
701                         TM_NODE_LEVEL_SUBPORT);
702                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
703                 cap->nonleaf.sched_n_children_max =
704                         tm_level_get_max_nodes(dev,
705                                 TM_NODE_LEVEL_PIPE);
706                 cap->nonleaf.sched_wfq_n_children_per_group_max =
707                         cap->nonleaf.sched_n_children_max;
708                 break;
709
710         case TM_NODE_LEVEL_PIPE:
711                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
712                         TM_NODE_LEVEL_PIPE);
713                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
714                 break;
715
716         case TM_NODE_LEVEL_TC:
717                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
718                         TM_NODE_LEVEL_TC);
719                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
720                 break;
721
722         case TM_NODE_LEVEL_QUEUE:
723         default:
724                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
725                         TM_NODE_LEVEL_QUEUE);
726                 cap->n_nodes_leaf_max = cap->n_nodes_max;
727                 break;
728         }
729
730         return 0;
731 }
732
733 static const struct rte_tm_node_capabilities tm_node_cap[] = {
734         [TM_NODE_LEVEL_PORT] = {
735                 .shaper_private_supported = 1,
736                 .shaper_private_dual_rate_supported = 0,
737                 .shaper_private_rate_min = 1,
738                 .shaper_private_rate_max = UINT32_MAX,
739                 .shaper_shared_n_max = 0,
740
741                 {.nonleaf = {
742                         .sched_n_children_max = UINT32_MAX,
743                         .sched_sp_n_priorities_max = 1,
744                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
745                         .sched_wfq_n_groups_max = 1,
746                         .sched_wfq_weight_max = 1,
747                 } },
748
749                 .stats_mask = STATS_MASK_DEFAULT,
750         },
751
752         [TM_NODE_LEVEL_SUBPORT] = {
753                 .shaper_private_supported = 1,
754                 .shaper_private_dual_rate_supported = 0,
755                 .shaper_private_rate_min = 1,
756                 .shaper_private_rate_max = UINT32_MAX,
757                 .shaper_shared_n_max = 0,
758
759                 {.nonleaf = {
760                         .sched_n_children_max = UINT32_MAX,
761                         .sched_sp_n_priorities_max = 1,
762                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
763                         .sched_wfq_n_groups_max = 1,
764                         .sched_wfq_weight_max = UINT32_MAX,
765                 } },
766
767                 .stats_mask = STATS_MASK_DEFAULT,
768         },
769
770         [TM_NODE_LEVEL_PIPE] = {
771                 .shaper_private_supported = 1,
772                 .shaper_private_dual_rate_supported = 0,
773                 .shaper_private_rate_min = 1,
774                 .shaper_private_rate_max = UINT32_MAX,
775                 .shaper_shared_n_max = 0,
776
777                 {.nonleaf = {
778                         .sched_n_children_max =
779                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
780                         .sched_sp_n_priorities_max =
781                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
782                         .sched_wfq_n_children_per_group_max = 1,
783                         .sched_wfq_n_groups_max = 0,
784                         .sched_wfq_weight_max = 1,
785                 } },
786
787                 .stats_mask = STATS_MASK_DEFAULT,
788         },
789
790         [TM_NODE_LEVEL_TC] = {
791                 .shaper_private_supported = 1,
792                 .shaper_private_dual_rate_supported = 0,
793                 .shaper_private_rate_min = 1,
794                 .shaper_private_rate_max = UINT32_MAX,
795                 .shaper_shared_n_max = 1,
796
797                 {.nonleaf = {
798                         .sched_n_children_max =
799                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
800                         .sched_sp_n_priorities_max = 1,
801                         .sched_wfq_n_children_per_group_max =
802                                 RTE_SCHED_BE_QUEUES_PER_PIPE,
803                         .sched_wfq_n_groups_max = 1,
804                         .sched_wfq_weight_max = UINT32_MAX,
805                 } },
806
807                 .stats_mask = STATS_MASK_DEFAULT,
808         },
809
810         [TM_NODE_LEVEL_QUEUE] = {
811                 .shaper_private_supported = 0,
812                 .shaper_private_dual_rate_supported = 0,
813                 .shaper_private_rate_min = 0,
814                 .shaper_private_rate_max = 0,
815                 .shaper_shared_n_max = 0,
816
817
818                 {.leaf = {
819                         .cman_head_drop_supported = 0,
820                         .cman_wred_packet_mode_supported = WRED_SUPPORTED,
821                         .cman_wred_byte_mode_supported = 0,
822                         .cman_wred_context_private_supported = WRED_SUPPORTED,
823                         .cman_wred_context_shared_n_max = 0,
824                 } },
825
826                 .stats_mask = STATS_MASK_QUEUE,
827         },
828 };
829
830 /* Traffic manager node capabilities get */
831 static int
832 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
833         uint32_t node_id,
834         struct rte_tm_node_capabilities *cap,
835         struct rte_tm_error *error)
836 {
837         struct tm_node *tm_node;
838
839         if (cap == NULL)
840                 return -rte_tm_error_set(error,
841                    EINVAL,
842                    RTE_TM_ERROR_TYPE_CAPABILITIES,
843                    NULL,
844                    rte_strerror(EINVAL));
845
846         tm_node = tm_node_search(dev, node_id);
847         if (tm_node == NULL)
848                 return -rte_tm_error_set(error,
849                    EINVAL,
850                    RTE_TM_ERROR_TYPE_NODE_ID,
851                    NULL,
852                    rte_strerror(EINVAL));
853
854         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
855
856         switch (tm_node->level) {
857         case TM_NODE_LEVEL_PORT:
858                 cap->nonleaf.sched_n_children_max =
859                         tm_level_get_max_nodes(dev,
860                                 TM_NODE_LEVEL_SUBPORT);
861                 cap->nonleaf.sched_wfq_n_children_per_group_max =
862                         cap->nonleaf.sched_n_children_max;
863                 break;
864
865         case TM_NODE_LEVEL_SUBPORT:
866                 cap->nonleaf.sched_n_children_max =
867                         tm_level_get_max_nodes(dev,
868                                 TM_NODE_LEVEL_PIPE);
869                 cap->nonleaf.sched_wfq_n_children_per_group_max =
870                         cap->nonleaf.sched_n_children_max;
871                 break;
872
873         case TM_NODE_LEVEL_PIPE:
874         case TM_NODE_LEVEL_TC:
875         case TM_NODE_LEVEL_QUEUE:
876         default:
877                 break;
878         }
879
880         return 0;
881 }
882
883 static int
884 shaper_profile_check(struct rte_eth_dev *dev,
885         uint32_t shaper_profile_id,
886         struct rte_tm_shaper_params *profile,
887         struct rte_tm_error *error)
888 {
889         struct tm_shaper_profile *sp;
890
891         /* Shaper profile ID must not be NONE. */
892         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
893                 return -rte_tm_error_set(error,
894                         EINVAL,
895                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
896                         NULL,
897                         rte_strerror(EINVAL));
898
899         /* Shaper profile must not exist. */
900         sp = tm_shaper_profile_search(dev, shaper_profile_id);
901         if (sp)
902                 return -rte_tm_error_set(error,
903                         EEXIST,
904                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
905                         NULL,
906                         rte_strerror(EEXIST));
907
908         /* Profile must not be NULL. */
909         if (profile == NULL)
910                 return -rte_tm_error_set(error,
911                         EINVAL,
912                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
913                         NULL,
914                         rte_strerror(EINVAL));
915
916         /* Peak rate: non-zero, 32-bit */
917         if (profile->peak.rate == 0 ||
918                 profile->peak.rate >= UINT32_MAX)
919                 return -rte_tm_error_set(error,
920                         EINVAL,
921                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
922                         NULL,
923                         rte_strerror(EINVAL));
924
925         /* Peak size: non-zero, 32-bit */
926         if (profile->peak.size == 0 ||
927                 profile->peak.size >= UINT32_MAX)
928                 return -rte_tm_error_set(error,
929                         EINVAL,
930                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
931                         NULL,
932                         rte_strerror(EINVAL));
933
934         /* Dual-rate profiles are not supported. */
935         if (profile->committed.rate != 0)
936                 return -rte_tm_error_set(error,
937                         EINVAL,
938                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
939                         NULL,
940                         rte_strerror(EINVAL));
941
942         /* Packet length adjust: 24 bytes */
943         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
944                 return -rte_tm_error_set(error,
945                         EINVAL,
946                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
947                         NULL,
948                         rte_strerror(EINVAL));
949
950         return 0;
951 }
952
953 /* Traffic manager shaper profile add */
954 static int
955 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
956         uint32_t shaper_profile_id,
957         struct rte_tm_shaper_params *profile,
958         struct rte_tm_error *error)
959 {
960         struct pmd_internals *p = dev->data->dev_private;
961         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
962         struct tm_shaper_profile *sp;
963         int status;
964
965         /* Check input params */
966         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
967         if (status)
968                 return status;
969
970         /* Memory allocation */
971         sp = calloc(1, sizeof(struct tm_shaper_profile));
972         if (sp == NULL)
973                 return -rte_tm_error_set(error,
974                         ENOMEM,
975                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
976                         NULL,
977                         rte_strerror(ENOMEM));
978
979         /* Fill in */
980         sp->shaper_profile_id = shaper_profile_id;
981         memcpy(&sp->params, profile, sizeof(sp->params));
982
983         /* Add to list */
984         TAILQ_INSERT_TAIL(spl, sp, node);
985         p->soft.tm.h.n_shaper_profiles++;
986
987         return 0;
988 }
989
990 /* Traffic manager shaper profile delete */
991 static int
992 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
993         uint32_t shaper_profile_id,
994         struct rte_tm_error *error)
995 {
996         struct pmd_internals *p = dev->data->dev_private;
997         struct tm_shaper_profile *sp;
998
999         /* Check existing */
1000         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1001         if (sp == NULL)
1002                 return -rte_tm_error_set(error,
1003                         EINVAL,
1004                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1005                         NULL,
1006                         rte_strerror(EINVAL));
1007
1008         /* Check unused */
1009         if (sp->n_users)
1010                 return -rte_tm_error_set(error,
1011                         EBUSY,
1012                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1013                         NULL,
1014                         rte_strerror(EBUSY));
1015
1016         /* Remove from list */
1017         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1018         p->soft.tm.h.n_shaper_profiles--;
1019         free(sp);
1020
1021         return 0;
1022 }
1023
1024 static struct tm_node *
1025 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1026         struct tm_shared_shaper *ss)
1027 {
1028         struct pmd_internals *p = dev->data->dev_private;
1029         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1030         struct tm_node *n;
1031
1032         /* Subport: each TC uses shared shaper  */
1033         TAILQ_FOREACH(n, nl, node) {
1034                 if (n->level != TM_NODE_LEVEL_TC ||
1035                         n->params.n_shared_shapers == 0 ||
1036                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1037                         continue;
1038
1039                 return n;
1040         }
1041
1042         return NULL;
1043 }
1044
1045 static int
1046 update_subport_tc_rate(struct rte_eth_dev *dev,
1047         struct tm_node *nt,
1048         struct tm_shared_shaper *ss,
1049         struct tm_shaper_profile *sp_new)
1050 {
1051         struct pmd_internals *p = dev->data->dev_private;
1052         uint32_t tc_id = tm_node_tc_id(dev, nt);
1053
1054         struct tm_node *np = nt->parent_node;
1055
1056         struct tm_node *ns = np->parent_node;
1057         uint32_t subport_id = tm_node_subport_id(dev, ns);
1058
1059         struct rte_sched_subport_params subport_params;
1060
1061         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1062                 ss->shaper_profile_id);
1063
1064         /* Derive new subport configuration. */
1065         memcpy(&subport_params,
1066                 &p->soft.tm.params.subport_params[subport_id],
1067                 sizeof(subport_params));
1068         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1069
1070         /* Update the subport configuration. */
1071         if (rte_sched_subport_config(SCHED(p),
1072                 subport_id, &subport_params))
1073                 return -1;
1074
1075         /* Commit changes. */
1076         sp_old->n_users--;
1077
1078         ss->shaper_profile_id = sp_new->shaper_profile_id;
1079         sp_new->n_users++;
1080
1081         memcpy(&p->soft.tm.params.subport_params[subport_id],
1082                 &subport_params,
1083                 sizeof(subport_params));
1084
1085         return 0;
1086 }
1087
1088 /* Traffic manager shared shaper add/update */
1089 static int
1090 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1091         uint32_t shared_shaper_id,
1092         uint32_t shaper_profile_id,
1093         struct rte_tm_error *error)
1094 {
1095         struct pmd_internals *p = dev->data->dev_private;
1096         struct tm_shared_shaper *ss;
1097         struct tm_shaper_profile *sp;
1098         struct tm_node *nt;
1099
1100         /* Shaper profile must be valid. */
1101         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1102         if (sp == NULL)
1103                 return -rte_tm_error_set(error,
1104                         EINVAL,
1105                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1106                         NULL,
1107                         rte_strerror(EINVAL));
1108
1109         /**
1110          * Add new shared shaper
1111          */
1112         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1113         if (ss == NULL) {
1114                 struct tm_shared_shaper_list *ssl =
1115                         &p->soft.tm.h.shared_shapers;
1116
1117                 /* Hierarchy must not be frozen */
1118                 if (p->soft.tm.hierarchy_frozen)
1119                         return -rte_tm_error_set(error,
1120                                 EBUSY,
1121                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1122                                 NULL,
1123                                 rte_strerror(EBUSY));
1124
1125                 /* Memory allocation */
1126                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1127                 if (ss == NULL)
1128                         return -rte_tm_error_set(error,
1129                                 ENOMEM,
1130                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1131                                 NULL,
1132                                 rte_strerror(ENOMEM));
1133
1134                 /* Fill in */
1135                 ss->shared_shaper_id = shared_shaper_id;
1136                 ss->shaper_profile_id = shaper_profile_id;
1137
1138                 /* Add to list */
1139                 TAILQ_INSERT_TAIL(ssl, ss, node);
1140                 p->soft.tm.h.n_shared_shapers++;
1141
1142                 return 0;
1143         }
1144
1145         /**
1146          * Update existing shared shaper
1147          */
1148         /* Hierarchy must be frozen (run-time update) */
1149         if (p->soft.tm.hierarchy_frozen == 0)
1150                 return -rte_tm_error_set(error,
1151                         EBUSY,
1152                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1153                         NULL,
1154                         rte_strerror(EBUSY));
1155
1156
1157         /* Propagate change. */
1158         nt = tm_shared_shaper_get_tc(dev, ss);
1159         if (update_subport_tc_rate(dev, nt, ss, sp))
1160                 return -rte_tm_error_set(error,
1161                         EINVAL,
1162                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1163                         NULL,
1164                         rte_strerror(EINVAL));
1165
1166         return 0;
1167 }
1168
1169 /* Traffic manager shared shaper delete */
1170 static int
1171 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1172         uint32_t shared_shaper_id,
1173         struct rte_tm_error *error)
1174 {
1175         struct pmd_internals *p = dev->data->dev_private;
1176         struct tm_shared_shaper *ss;
1177
1178         /* Check existing */
1179         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1180         if (ss == NULL)
1181                 return -rte_tm_error_set(error,
1182                         EINVAL,
1183                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1184                         NULL,
1185                         rte_strerror(EINVAL));
1186
1187         /* Check unused */
1188         if (ss->n_users)
1189                 return -rte_tm_error_set(error,
1190                         EBUSY,
1191                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1192                         NULL,
1193                         rte_strerror(EBUSY));
1194
1195         /* Remove from list */
1196         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1197         p->soft.tm.h.n_shared_shapers--;
1198         free(ss);
1199
1200         return 0;
1201 }
1202
1203 static int
1204 wred_profile_check(struct rte_eth_dev *dev,
1205         uint32_t wred_profile_id,
1206         struct rte_tm_wred_params *profile,
1207         struct rte_tm_error *error)
1208 {
1209         struct tm_wred_profile *wp;
1210         enum rte_color color;
1211
1212         /* WRED profile ID must not be NONE. */
1213         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1214                 return -rte_tm_error_set(error,
1215                         EINVAL,
1216                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1217                         NULL,
1218                         rte_strerror(EINVAL));
1219
1220         /* WRED profile must not exist. */
1221         wp = tm_wred_profile_search(dev, wred_profile_id);
1222         if (wp)
1223                 return -rte_tm_error_set(error,
1224                         EEXIST,
1225                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1226                         NULL,
1227                         rte_strerror(EEXIST));
1228
1229         /* Profile must not be NULL. */
1230         if (profile == NULL)
1231                 return -rte_tm_error_set(error,
1232                         EINVAL,
1233                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1234                         NULL,
1235                         rte_strerror(EINVAL));
1236
1237         /* WRED profile should be in packet mode */
1238         if (profile->packet_mode == 0)
1239                 return -rte_tm_error_set(error,
1240                         ENOTSUP,
1241                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1242                         NULL,
1243                         rte_strerror(ENOTSUP));
1244
1245         /* min_th <= max_th, max_th > 0  */
1246         for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1247                 uint32_t min_th = profile->red_params[color].min_th;
1248                 uint32_t max_th = profile->red_params[color].max_th;
1249
1250                 if (min_th > max_th ||
1251                         max_th == 0 ||
1252                         min_th > UINT16_MAX ||
1253                         max_th > UINT16_MAX)
1254                         return -rte_tm_error_set(error,
1255                                 EINVAL,
1256                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1257                                 NULL,
1258                                 rte_strerror(EINVAL));
1259         }
1260
1261         return 0;
1262 }
1263
1264 /* Traffic manager WRED profile add */
1265 static int
1266 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1267         uint32_t wred_profile_id,
1268         struct rte_tm_wred_params *profile,
1269         struct rte_tm_error *error)
1270 {
1271         struct pmd_internals *p = dev->data->dev_private;
1272         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1273         struct tm_wred_profile *wp;
1274         int status;
1275
1276         /* Check input params */
1277         status = wred_profile_check(dev, wred_profile_id, profile, error);
1278         if (status)
1279                 return status;
1280
1281         /* Memory allocation */
1282         wp = calloc(1, sizeof(struct tm_wred_profile));
1283         if (wp == NULL)
1284                 return -rte_tm_error_set(error,
1285                         ENOMEM,
1286                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1287                         NULL,
1288                         rte_strerror(ENOMEM));
1289
1290         /* Fill in */
1291         wp->wred_profile_id = wred_profile_id;
1292         memcpy(&wp->params, profile, sizeof(wp->params));
1293
1294         /* Add to list */
1295         TAILQ_INSERT_TAIL(wpl, wp, node);
1296         p->soft.tm.h.n_wred_profiles++;
1297
1298         return 0;
1299 }
1300
1301 /* Traffic manager WRED profile delete */
1302 static int
1303 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1304         uint32_t wred_profile_id,
1305         struct rte_tm_error *error)
1306 {
1307         struct pmd_internals *p = dev->data->dev_private;
1308         struct tm_wred_profile *wp;
1309
1310         /* Check existing */
1311         wp = tm_wred_profile_search(dev, wred_profile_id);
1312         if (wp == NULL)
1313                 return -rte_tm_error_set(error,
1314                         EINVAL,
1315                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1316                         NULL,
1317                         rte_strerror(EINVAL));
1318
1319         /* Check unused */
1320         if (wp->n_users)
1321                 return -rte_tm_error_set(error,
1322                         EBUSY,
1323                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1324                         NULL,
1325                         rte_strerror(EBUSY));
1326
1327         /* Remove from list */
1328         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1329         p->soft.tm.h.n_wred_profiles--;
1330         free(wp);
1331
1332         return 0;
1333 }
1334
1335 static int
1336 node_add_check_port(struct rte_eth_dev *dev,
1337         uint32_t node_id,
1338         uint32_t parent_node_id __rte_unused,
1339         uint32_t priority,
1340         uint32_t weight,
1341         uint32_t level_id __rte_unused,
1342         struct rte_tm_node_params *params,
1343         struct rte_tm_error *error)
1344 {
1345         struct pmd_internals *p = dev->data->dev_private;
1346         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1347                 params->shaper_profile_id);
1348
1349         /* node type: non-leaf */
1350         if (node_id < p->params.tm.n_queues)
1351                 return -rte_tm_error_set(error,
1352                         EINVAL,
1353                         RTE_TM_ERROR_TYPE_NODE_ID,
1354                         NULL,
1355                         rte_strerror(EINVAL));
1356
1357         /* Priority must be 0 */
1358         if (priority != 0)
1359                 return -rte_tm_error_set(error,
1360                         EINVAL,
1361                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1362                         NULL,
1363                         rte_strerror(EINVAL));
1364
1365         /* Weight must be 1 */
1366         if (weight != 1)
1367                 return -rte_tm_error_set(error,
1368                         EINVAL,
1369                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1370                         NULL,
1371                         rte_strerror(EINVAL));
1372
1373         /* Shaper must be valid */
1374         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1375                 sp == NULL)
1376                 return -rte_tm_error_set(error,
1377                         EINVAL,
1378                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1379                         NULL,
1380                         rte_strerror(EINVAL));
1381
1382         /* No shared shapers */
1383         if (params->n_shared_shapers != 0)
1384                 return -rte_tm_error_set(error,
1385                         EINVAL,
1386                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1387                         NULL,
1388                         rte_strerror(EINVAL));
1389
1390         /* Number of SP priorities must be 1 */
1391         if (params->nonleaf.n_sp_priorities != 1)
1392                 return -rte_tm_error_set(error,
1393                         EINVAL,
1394                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1395                         NULL,
1396                         rte_strerror(EINVAL));
1397
1398         /* Stats */
1399         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1400                 return -rte_tm_error_set(error,
1401                         EINVAL,
1402                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1403                         NULL,
1404                         rte_strerror(EINVAL));
1405
1406         return 0;
1407 }
1408
1409 static int
1410 node_add_check_subport(struct rte_eth_dev *dev,
1411         uint32_t node_id,
1412         uint32_t parent_node_id __rte_unused,
1413         uint32_t priority,
1414         uint32_t weight,
1415         uint32_t level_id __rte_unused,
1416         struct rte_tm_node_params *params,
1417         struct rte_tm_error *error)
1418 {
1419         struct pmd_internals *p = dev->data->dev_private;
1420
1421         /* node type: non-leaf */
1422         if (node_id < p->params.tm.n_queues)
1423                 return -rte_tm_error_set(error,
1424                         EINVAL,
1425                         RTE_TM_ERROR_TYPE_NODE_ID,
1426                         NULL,
1427                         rte_strerror(EINVAL));
1428
1429         /* Priority must be 0 */
1430         if (priority != 0)
1431                 return -rte_tm_error_set(error,
1432                         EINVAL,
1433                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1434                         NULL,
1435                         rte_strerror(EINVAL));
1436
1437         /* Weight must be 1 */
1438         if (weight != 1)
1439                 return -rte_tm_error_set(error,
1440                         EINVAL,
1441                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1442                         NULL,
1443                         rte_strerror(EINVAL));
1444
1445         /* Shaper must be valid */
1446         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1447                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1448                 return -rte_tm_error_set(error,
1449                         EINVAL,
1450                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1451                         NULL,
1452                         rte_strerror(EINVAL));
1453
1454         /* No shared shapers */
1455         if (params->n_shared_shapers != 0)
1456                 return -rte_tm_error_set(error,
1457                         EINVAL,
1458                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1459                         NULL,
1460                         rte_strerror(EINVAL));
1461
1462         /* Number of SP priorities must be 1 */
1463         if (params->nonleaf.n_sp_priorities != 1)
1464                 return -rte_tm_error_set(error,
1465                         EINVAL,
1466                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1467                         NULL,
1468                         rte_strerror(EINVAL));
1469
1470         /* Stats */
1471         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1472                 return -rte_tm_error_set(error,
1473                         EINVAL,
1474                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1475                         NULL,
1476                         rte_strerror(EINVAL));
1477
1478         return 0;
1479 }
1480
1481 static int
1482 node_add_check_pipe(struct rte_eth_dev *dev,
1483         uint32_t node_id,
1484         uint32_t parent_node_id __rte_unused,
1485         uint32_t priority,
1486         uint32_t weight __rte_unused,
1487         uint32_t level_id __rte_unused,
1488         struct rte_tm_node_params *params,
1489         struct rte_tm_error *error)
1490 {
1491         struct pmd_internals *p = dev->data->dev_private;
1492
1493         /* node type: non-leaf */
1494         if (node_id < p->params.tm.n_queues)
1495                 return -rte_tm_error_set(error,
1496                         EINVAL,
1497                         RTE_TM_ERROR_TYPE_NODE_ID,
1498                         NULL,
1499                         rte_strerror(EINVAL));
1500
1501         /* Priority must be 0 */
1502         if (priority != 0)
1503                 return -rte_tm_error_set(error,
1504                         EINVAL,
1505                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1506                         NULL,
1507                         rte_strerror(EINVAL));
1508
1509         /* Shaper must be valid */
1510         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1511                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1512                 return -rte_tm_error_set(error,
1513                         EINVAL,
1514                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1515                         NULL,
1516                         rte_strerror(EINVAL));
1517
1518         /* No shared shapers */
1519         if (params->n_shared_shapers != 0)
1520                 return -rte_tm_error_set(error,
1521                         EINVAL,
1522                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1523                         NULL,
1524                         rte_strerror(EINVAL));
1525
1526         /* Number of SP priorities must be 4 */
1527         if (params->nonleaf.n_sp_priorities !=
1528                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1529                 return -rte_tm_error_set(error,
1530                         EINVAL,
1531                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1532                         NULL,
1533                         rte_strerror(EINVAL));
1534
1535         /* WFQ mode must be byte mode */
1536         if (params->nonleaf.wfq_weight_mode != NULL &&
1537                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1538                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1539                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1540                 params->nonleaf.wfq_weight_mode[3] != 0)
1541                 return -rte_tm_error_set(error,
1542                         EINVAL,
1543                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1544                         NULL,
1545                         rte_strerror(EINVAL));
1546
1547         /* Stats */
1548         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1549                 return -rte_tm_error_set(error,
1550                         EINVAL,
1551                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1552                         NULL,
1553                         rte_strerror(EINVAL));
1554
1555         return 0;
1556 }
1557
1558 static int
1559 node_add_check_tc(struct rte_eth_dev *dev,
1560         uint32_t node_id,
1561         uint32_t parent_node_id __rte_unused,
1562         uint32_t priority __rte_unused,
1563         uint32_t weight,
1564         uint32_t level_id __rte_unused,
1565         struct rte_tm_node_params *params,
1566         struct rte_tm_error *error)
1567 {
1568         struct pmd_internals *p = dev->data->dev_private;
1569
1570         /* node type: non-leaf */
1571         if (node_id < p->params.tm.n_queues)
1572                 return -rte_tm_error_set(error,
1573                         EINVAL,
1574                         RTE_TM_ERROR_TYPE_NODE_ID,
1575                         NULL,
1576                         rte_strerror(EINVAL));
1577
1578         /* Weight must be 1 */
1579         if (weight != 1)
1580                 return -rte_tm_error_set(error,
1581                         EINVAL,
1582                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1583                         NULL,
1584                         rte_strerror(EINVAL));
1585
1586         /* Shaper must be valid */
1587         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1588                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1589                 return -rte_tm_error_set(error,
1590                         EINVAL,
1591                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1592                         NULL,
1593                         rte_strerror(EINVAL));
1594
1595         /* Single valid shared shaper */
1596         if (params->n_shared_shapers > 1)
1597                 return -rte_tm_error_set(error,
1598                         EINVAL,
1599                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1600                         NULL,
1601                         rte_strerror(EINVAL));
1602
1603         if (params->n_shared_shapers == 1 &&
1604                 (params->shared_shaper_id == NULL ||
1605                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1606                 return -rte_tm_error_set(error,
1607                         EINVAL,
1608                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1609                         NULL,
1610                         rte_strerror(EINVAL));
1611
1612         /* Number of priorities must be 1 */
1613         if (params->nonleaf.n_sp_priorities != 1)
1614                 return -rte_tm_error_set(error,
1615                         EINVAL,
1616                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1617                         NULL,
1618                         rte_strerror(EINVAL));
1619
1620         /* Stats */
1621         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1622                 return -rte_tm_error_set(error,
1623                         EINVAL,
1624                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1625                         NULL,
1626                         rte_strerror(EINVAL));
1627
1628         return 0;
1629 }
1630
1631 static int
1632 node_add_check_queue(struct rte_eth_dev *dev,
1633         uint32_t node_id,
1634         uint32_t parent_node_id __rte_unused,
1635         uint32_t priority,
1636         uint32_t weight __rte_unused,
1637         uint32_t level_id __rte_unused,
1638         struct rte_tm_node_params *params,
1639         struct rte_tm_error *error)
1640 {
1641         struct pmd_internals *p = dev->data->dev_private;
1642
1643         /* node type: leaf */
1644         if (node_id >= p->params.tm.n_queues)
1645                 return -rte_tm_error_set(error,
1646                         EINVAL,
1647                         RTE_TM_ERROR_TYPE_NODE_ID,
1648                         NULL,
1649                         rte_strerror(EINVAL));
1650
1651         /* Priority must be 0 */
1652         if (priority != 0)
1653                 return -rte_tm_error_set(error,
1654                         EINVAL,
1655                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1656                         NULL,
1657                         rte_strerror(EINVAL));
1658
1659         /* No shaper */
1660         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1661                 return -rte_tm_error_set(error,
1662                         EINVAL,
1663                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1664                         NULL,
1665                         rte_strerror(EINVAL));
1666
1667         /* No shared shapers */
1668         if (params->n_shared_shapers != 0)
1669                 return -rte_tm_error_set(error,
1670                         EINVAL,
1671                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1672                         NULL,
1673                         rte_strerror(EINVAL));
1674
1675         /* Congestion management must not be head drop */
1676         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1677                 return -rte_tm_error_set(error,
1678                         EINVAL,
1679                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1680                         NULL,
1681                         rte_strerror(EINVAL));
1682
1683         /* Congestion management set to WRED */
1684         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1685                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1686                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1687                         wred_profile_id);
1688
1689                 /* WRED profile (for private WRED context) must be valid */
1690                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1691                         wp == NULL)
1692                         return -rte_tm_error_set(error,
1693                                 EINVAL,
1694                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1695                                 NULL,
1696                                 rte_strerror(EINVAL));
1697
1698                 /* No shared WRED contexts */
1699                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1700                         return -rte_tm_error_set(error,
1701                                 EINVAL,
1702                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1703                                 NULL,
1704                                 rte_strerror(EINVAL));
1705         }
1706
1707         /* Stats */
1708         if (params->stats_mask & ~STATS_MASK_QUEUE)
1709                 return -rte_tm_error_set(error,
1710                         EINVAL,
1711                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1712                         NULL,
1713                         rte_strerror(EINVAL));
1714
1715         return 0;
1716 }
1717
1718 static int
1719 node_add_check(struct rte_eth_dev *dev,
1720         uint32_t node_id,
1721         uint32_t parent_node_id,
1722         uint32_t priority,
1723         uint32_t weight,
1724         uint32_t level_id,
1725         struct rte_tm_node_params *params,
1726         struct rte_tm_error *error)
1727 {
1728         struct tm_node *pn;
1729         uint32_t level;
1730         int status;
1731
1732         /* node_id, parent_node_id:
1733          *    -node_id must not be RTE_TM_NODE_ID_NULL
1734          *    -node_id must not be in use
1735          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1736          *        -root node must not exist
1737          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1738          *        -parent_node_id must be valid
1739          */
1740         if (node_id == RTE_TM_NODE_ID_NULL)
1741                 return -rte_tm_error_set(error,
1742                         EINVAL,
1743                         RTE_TM_ERROR_TYPE_NODE_ID,
1744                         NULL,
1745                         rte_strerror(EINVAL));
1746
1747         if (tm_node_search(dev, node_id))
1748                 return -rte_tm_error_set(error,
1749                         EEXIST,
1750                         RTE_TM_ERROR_TYPE_NODE_ID,
1751                         NULL,
1752                         rte_strerror(EEXIST));
1753
1754         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1755                 pn = NULL;
1756                 if (tm_root_node_present(dev))
1757                         return -rte_tm_error_set(error,
1758                                 EEXIST,
1759                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1760                                 NULL,
1761                                 rte_strerror(EEXIST));
1762         } else {
1763                 pn = tm_node_search(dev, parent_node_id);
1764                 if (pn == NULL)
1765                         return -rte_tm_error_set(error,
1766                                 EINVAL,
1767                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1768                                 NULL,
1769                                 rte_strerror(EINVAL));
1770         }
1771
1772         /* priority: must be 0 .. 3 */
1773         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1774                 return -rte_tm_error_set(error,
1775                         EINVAL,
1776                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1777                         NULL,
1778                         rte_strerror(EINVAL));
1779
1780         /* weight: must be 1 .. 255 */
1781         if (weight == 0 || weight >= UINT8_MAX)
1782                 return -rte_tm_error_set(error,
1783                         EINVAL,
1784                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1785                         NULL,
1786                         rte_strerror(EINVAL));
1787
1788         /* level_id: if valid, then
1789          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1790          *        -level_id must be zero
1791          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1792          *        -level_id must be parent level ID plus one
1793          */
1794         level = (pn == NULL) ? 0 : pn->level + 1;
1795         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1796                 return -rte_tm_error_set(error,
1797                         EINVAL,
1798                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1799                         NULL,
1800                         rte_strerror(EINVAL));
1801
1802         /* params: must not be NULL */
1803         if (params == NULL)
1804                 return -rte_tm_error_set(error,
1805                         EINVAL,
1806                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1807                         NULL,
1808                         rte_strerror(EINVAL));
1809
1810         /* params: per level checks */
1811         switch (level) {
1812         case TM_NODE_LEVEL_PORT:
1813                 status = node_add_check_port(dev, node_id,
1814                         parent_node_id, priority, weight, level_id,
1815                         params, error);
1816                 if (status)
1817                         return status;
1818                 break;
1819
1820         case TM_NODE_LEVEL_SUBPORT:
1821                 status = node_add_check_subport(dev, node_id,
1822                         parent_node_id, priority, weight, level_id,
1823                         params, error);
1824                 if (status)
1825                         return status;
1826                 break;
1827
1828         case TM_NODE_LEVEL_PIPE:
1829                 status = node_add_check_pipe(dev, node_id,
1830                         parent_node_id, priority, weight, level_id,
1831                         params, error);
1832                 if (status)
1833                         return status;
1834                 break;
1835
1836         case TM_NODE_LEVEL_TC:
1837                 status = node_add_check_tc(dev, node_id,
1838                         parent_node_id, priority, weight, level_id,
1839                         params, error);
1840                 if (status)
1841                         return status;
1842                 break;
1843
1844         case TM_NODE_LEVEL_QUEUE:
1845                 status = node_add_check_queue(dev, node_id,
1846                         parent_node_id, priority, weight, level_id,
1847                         params, error);
1848                 if (status)
1849                         return status;
1850                 break;
1851
1852         default:
1853                 return -rte_tm_error_set(error,
1854                         EINVAL,
1855                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1856                         NULL,
1857                         rte_strerror(EINVAL));
1858         }
1859
1860         return 0;
1861 }
1862
1863 /* Traffic manager node add */
1864 static int
1865 pmd_tm_node_add(struct rte_eth_dev *dev,
1866         uint32_t node_id,
1867         uint32_t parent_node_id,
1868         uint32_t priority,
1869         uint32_t weight,
1870         uint32_t level_id,
1871         struct rte_tm_node_params *params,
1872         struct rte_tm_error *error)
1873 {
1874         struct pmd_internals *p = dev->data->dev_private;
1875         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1876         struct tm_node *n;
1877         uint32_t i;
1878         int status;
1879
1880         /* Checks */
1881         if (p->soft.tm.hierarchy_frozen)
1882                 return -rte_tm_error_set(error,
1883                         EBUSY,
1884                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1885                         NULL,
1886                         rte_strerror(EBUSY));
1887
1888         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1889                 level_id, params, error);
1890         if (status)
1891                 return status;
1892
1893         /* Memory allocation */
1894         n = calloc(1, sizeof(struct tm_node));
1895         if (n == NULL)
1896                 return -rte_tm_error_set(error,
1897                         ENOMEM,
1898                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1899                         NULL,
1900                         rte_strerror(ENOMEM));
1901
1902         /* Fill in */
1903         n->node_id = node_id;
1904         n->parent_node_id = parent_node_id;
1905         n->priority = priority;
1906         n->weight = weight;
1907
1908         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1909                 n->parent_node = tm_node_search(dev, parent_node_id);
1910                 n->level = n->parent_node->level + 1;
1911         }
1912
1913         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1914                 n->shaper_profile = tm_shaper_profile_search(dev,
1915                         params->shaper_profile_id);
1916
1917         if (n->level == TM_NODE_LEVEL_QUEUE &&
1918                 params->leaf.cman == RTE_TM_CMAN_WRED)
1919                 n->wred_profile = tm_wred_profile_search(dev,
1920                         params->leaf.wred.wred_profile_id);
1921
1922         memcpy(&n->params, params, sizeof(n->params));
1923
1924         /* Add to list */
1925         TAILQ_INSERT_TAIL(nl, n, node);
1926         p->soft.tm.h.n_nodes++;
1927
1928         /* Update dependencies */
1929         if (n->parent_node)
1930                 n->parent_node->n_children++;
1931
1932         if (n->shaper_profile)
1933                 n->shaper_profile->n_users++;
1934
1935         for (i = 0; i < params->n_shared_shapers; i++) {
1936                 struct tm_shared_shaper *ss;
1937
1938                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1939                 ss->n_users++;
1940         }
1941
1942         if (n->wred_profile)
1943                 n->wred_profile->n_users++;
1944
1945         p->soft.tm.h.n_tm_nodes[n->level]++;
1946
1947         return 0;
1948 }
1949
1950 /* Traffic manager node delete */
1951 static int
1952 pmd_tm_node_delete(struct rte_eth_dev *dev,
1953         uint32_t node_id,
1954         struct rte_tm_error *error)
1955 {
1956         struct pmd_internals *p = dev->data->dev_private;
1957         struct tm_node *n;
1958         uint32_t i;
1959
1960         /* Check hierarchy changes are currently allowed */
1961         if (p->soft.tm.hierarchy_frozen)
1962                 return -rte_tm_error_set(error,
1963                         EBUSY,
1964                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1965                         NULL,
1966                         rte_strerror(EBUSY));
1967
1968         /* Check existing */
1969         n = tm_node_search(dev, node_id);
1970         if (n == NULL)
1971                 return -rte_tm_error_set(error,
1972                         EINVAL,
1973                         RTE_TM_ERROR_TYPE_NODE_ID,
1974                         NULL,
1975                         rte_strerror(EINVAL));
1976
1977         /* Check unused */
1978         if (n->n_children)
1979                 return -rte_tm_error_set(error,
1980                         EBUSY,
1981                         RTE_TM_ERROR_TYPE_NODE_ID,
1982                         NULL,
1983                         rte_strerror(EBUSY));
1984
1985         /* Update dependencies */
1986         p->soft.tm.h.n_tm_nodes[n->level]--;
1987
1988         if (n->wred_profile)
1989                 n->wred_profile->n_users--;
1990
1991         for (i = 0; i < n->params.n_shared_shapers; i++) {
1992                 struct tm_shared_shaper *ss;
1993
1994                 ss = tm_shared_shaper_search(dev,
1995                                 n->params.shared_shaper_id[i]);
1996                 ss->n_users--;
1997         }
1998
1999         if (n->shaper_profile)
2000                 n->shaper_profile->n_users--;
2001
2002         if (n->parent_node)
2003                 n->parent_node->n_children--;
2004
2005         /* Remove from list */
2006         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2007         p->soft.tm.h.n_nodes--;
2008         free(n);
2009
2010         return 0;
2011 }
2012
2013
2014 static void
2015 pipe_profile_build(struct rte_eth_dev *dev,
2016         struct tm_node *np,
2017         struct rte_sched_pipe_params *pp)
2018 {
2019         struct pmd_internals *p = dev->data->dev_private;
2020         struct tm_hierarchy *h = &p->soft.tm.h;
2021         struct tm_node_list *nl = &h->nodes;
2022         struct tm_node *nt, *nq;
2023
2024         memset(pp, 0, sizeof(*pp));
2025
2026         /* Pipe */
2027         pp->tb_rate = np->shaper_profile->params.peak.rate;
2028         pp->tb_size = np->shaper_profile->params.peak.size;
2029
2030         /* Traffic Class (TC) */
2031         pp->tc_period = PIPE_TC_PERIOD;
2032
2033         pp->tc_ov_weight = np->weight;
2034
2035         TAILQ_FOREACH(nt, nl, node) {
2036                 uint32_t queue_id = 0;
2037
2038                 if (nt->level != TM_NODE_LEVEL_TC ||
2039                         nt->parent_node_id != np->node_id)
2040                         continue;
2041
2042                 pp->tc_rate[nt->priority] =
2043                         nt->shaper_profile->params.peak.rate;
2044
2045                 /* Queue */
2046                 TAILQ_FOREACH(nq, nl, node) {
2047
2048                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2049                                 nq->parent_node_id != nt->node_id)
2050                                 continue;
2051
2052                         if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2053                                 pp->wrr_weights[queue_id] = nq->weight;
2054
2055                         queue_id++;
2056                 }
2057         }
2058 }
2059
2060 static int
2061 pipe_profile_free_exists(struct rte_eth_dev *dev,
2062         uint32_t *pipe_profile_id)
2063 {
2064         struct pmd_internals *p = dev->data->dev_private;
2065         struct tm_params *t = &p->soft.tm.params;
2066
2067         if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2068                 *pipe_profile_id = t->n_pipe_profiles;
2069                 return 1;
2070         }
2071
2072         return 0;
2073 }
2074
2075 static int
2076 pipe_profile_exists(struct rte_eth_dev *dev,
2077         struct rte_sched_pipe_params *pp,
2078         uint32_t *pipe_profile_id)
2079 {
2080         struct pmd_internals *p = dev->data->dev_private;
2081         struct tm_params *t = &p->soft.tm.params;
2082         uint32_t i;
2083
2084         for (i = 0; i < t->n_pipe_profiles; i++)
2085                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2086                         if (pipe_profile_id)
2087                                 *pipe_profile_id = i;
2088                         return 1;
2089                 }
2090
2091         return 0;
2092 }
2093
2094 static void
2095 pipe_profile_install(struct rte_eth_dev *dev,
2096         struct rte_sched_pipe_params *pp,
2097         uint32_t pipe_profile_id)
2098 {
2099         struct pmd_internals *p = dev->data->dev_private;
2100         struct tm_params *t = &p->soft.tm.params;
2101
2102         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2103         t->n_pipe_profiles++;
2104 }
2105
2106 static void
2107 pipe_profile_mark(struct rte_eth_dev *dev,
2108         uint32_t subport_id,
2109         uint32_t pipe_id,
2110         uint32_t pipe_profile_id)
2111 {
2112         struct pmd_internals *p = dev->data->dev_private;
2113         struct tm_hierarchy *h = &p->soft.tm.h;
2114         struct tm_params *t = &p->soft.tm.params;
2115         uint32_t n_pipes_per_subport, pos;
2116
2117         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2118                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2119         pos = subport_id * n_pipes_per_subport + pipe_id;
2120
2121         t->pipe_to_profile[pos] = pipe_profile_id;
2122 }
2123
2124 static struct rte_sched_pipe_params *
2125 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2126 {
2127         struct pmd_internals *p = dev->data->dev_private;
2128         struct tm_hierarchy *h = &p->soft.tm.h;
2129         struct tm_params *t = &p->soft.tm.params;
2130         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2131                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2132
2133         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2134         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2135
2136         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2137         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2138
2139         return &t->pipe_profiles[pipe_profile_id];
2140 }
2141
2142 static int
2143 pipe_profiles_generate(struct rte_eth_dev *dev)
2144 {
2145         struct pmd_internals *p = dev->data->dev_private;
2146         struct tm_hierarchy *h = &p->soft.tm.h;
2147         struct tm_node_list *nl = &h->nodes;
2148         struct tm_node *ns, *np;
2149         uint32_t subport_id;
2150
2151         /* Objective: Fill in the following fields in struct tm_params:
2152          *    - pipe_profiles
2153          *    - n_pipe_profiles
2154          *    - pipe_to_profile
2155          */
2156
2157         subport_id = 0;
2158         TAILQ_FOREACH(ns, nl, node) {
2159                 uint32_t pipe_id;
2160
2161                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2162                         continue;
2163
2164                 pipe_id = 0;
2165                 TAILQ_FOREACH(np, nl, node) {
2166                         struct rte_sched_pipe_params pp;
2167                         uint32_t pos;
2168
2169                         if (np->level != TM_NODE_LEVEL_PIPE ||
2170                                 np->parent_node_id != ns->node_id)
2171                                 continue;
2172
2173                         pipe_profile_build(dev, np, &pp);
2174
2175                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2176                                 if (!pipe_profile_free_exists(dev, &pos))
2177                                         return -1;
2178
2179                                 pipe_profile_install(dev, &pp, pos);
2180                         }
2181
2182                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2183
2184                         pipe_id++;
2185                 }
2186
2187                 subport_id++;
2188         }
2189
2190         return 0;
2191 }
2192
2193 static struct tm_wred_profile *
2194 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2195 {
2196         struct pmd_internals *p = dev->data->dev_private;
2197         struct tm_hierarchy *h = &p->soft.tm.h;
2198         struct tm_node_list *nl = &h->nodes;
2199         struct tm_node *nq;
2200
2201         TAILQ_FOREACH(nq, nl, node) {
2202                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2203                         nq->parent_node->priority != tc_id)
2204                         continue;
2205
2206                 return nq->wred_profile;
2207         }
2208
2209         return NULL;
2210 }
2211
2212 #ifdef RTE_SCHED_RED
2213
2214 static void
2215 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2216 {
2217         struct pmd_internals *p = dev->data->dev_private;
2218         struct rte_sched_subport_params *pp =
2219                 &p->soft.tm.params.subport_params[subport_id];
2220
2221         uint32_t tc_id;
2222         enum rte_color color;
2223
2224         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2225                 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2226                         struct rte_red_params *dst =
2227                                 &pp->red_params[tc_id][color];
2228                         struct tm_wred_profile *src_wp =
2229                                 tm_tc_wred_profile_get(dev, tc_id);
2230                         struct rte_tm_red_params *src =
2231                                 &src_wp->params.red_params[color];
2232
2233                         memcpy(dst, src, sizeof(*dst));
2234                 }
2235 }
2236
2237 #else
2238
2239 #define wred_profiles_set(dev, subport_id)
2240
2241 #endif
2242
2243 static struct tm_shared_shaper *
2244 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2245 {
2246         return (tc_node->params.n_shared_shapers) ?
2247                 tm_shared_shaper_search(dev,
2248                         tc_node->params.shared_shaper_id[0]) :
2249                 NULL;
2250 }
2251
2252 static struct tm_shared_shaper *
2253 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2254         struct tm_node *subport_node,
2255         uint32_t tc_id)
2256 {
2257         struct pmd_internals *p = dev->data->dev_private;
2258         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2259         struct tm_node *n;
2260
2261         TAILQ_FOREACH(n, nl, node) {
2262                 if (n->level != TM_NODE_LEVEL_TC ||
2263                         n->parent_node->parent_node_id !=
2264                                 subport_node->node_id ||
2265                         n->priority != tc_id)
2266                         continue;
2267
2268                 return tm_tc_shared_shaper_get(dev, n);
2269         }
2270
2271         return NULL;
2272 }
2273
2274 static int
2275 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2276 {
2277         struct pmd_internals *p = dev->data->dev_private;
2278         struct tm_hierarchy *h = &p->soft.tm.h;
2279         struct tm_node_list *nl = &h->nodes;
2280         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2281         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2282         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2283         struct tm_shared_shaper *ss;
2284
2285         uint32_t n_pipes_per_subport;
2286
2287         /* Root node exists. */
2288         if (nr == NULL)
2289                 return -rte_tm_error_set(error,
2290                         EINVAL,
2291                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2292                         NULL,
2293                         rte_strerror(EINVAL));
2294
2295         /* There is at least one subport, max is not exceeded. */
2296         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2297                 return -rte_tm_error_set(error,
2298                         EINVAL,
2299                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2300                         NULL,
2301                         rte_strerror(EINVAL));
2302
2303         /* There is at least one pipe. */
2304         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2305                 return -rte_tm_error_set(error,
2306                         EINVAL,
2307                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2308                         NULL,
2309                         rte_strerror(EINVAL));
2310
2311         /* Number of pipes is the same for all subports. Maximum number of pipes
2312          * per subport is not exceeded.
2313          */
2314         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2315                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2316
2317         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2318                 return -rte_tm_error_set(error,
2319                         EINVAL,
2320                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2321                         NULL,
2322                         rte_strerror(EINVAL));
2323
2324         TAILQ_FOREACH(ns, nl, node) {
2325                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2326                         continue;
2327
2328                 if (ns->n_children != n_pipes_per_subport)
2329                         return -rte_tm_error_set(error,
2330                                 EINVAL,
2331                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2332                                 NULL,
2333                                 rte_strerror(EINVAL));
2334         }
2335
2336         /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2337         TAILQ_FOREACH(np, nl, node) {
2338                 uint32_t mask = 0, mask_expected =
2339                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2340                                 uint32_t);
2341
2342                 if (np->level != TM_NODE_LEVEL_PIPE)
2343                         continue;
2344
2345                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2346                         return -rte_tm_error_set(error,
2347                                 EINVAL,
2348                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2349                                 NULL,
2350                                 rte_strerror(EINVAL));
2351
2352                 TAILQ_FOREACH(nt, nl, node) {
2353                         if (nt->level != TM_NODE_LEVEL_TC ||
2354                                 nt->parent_node_id != np->node_id)
2355                                 continue;
2356
2357                         mask |= 1 << nt->priority;
2358                 }
2359
2360                 if (mask != mask_expected)
2361                         return -rte_tm_error_set(error,
2362                                 EINVAL,
2363                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2364                                 NULL,
2365                                 rte_strerror(EINVAL));
2366         }
2367
2368         /** Each Strict priority TC has exactly 1 packet queues while
2369          *      lowest priority TC (Best-effort) has 4 queues.
2370          */
2371         TAILQ_FOREACH(nt, nl, node) {
2372                 if (nt->level != TM_NODE_LEVEL_TC)
2373                         continue;
2374
2375                 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2376                         return -rte_tm_error_set(error,
2377                                 EINVAL,
2378                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2379                                 NULL,
2380                                 rte_strerror(EINVAL));
2381         }
2382
2383         /**
2384          * Shared shapers:
2385          *    -For each TC #i, all pipes in the same subport use the same
2386          *     shared shaper (or no shared shaper) for their TC#i.
2387          *    -Each shared shaper needs to have at least one user. All its
2388          *     users have to be TC nodes with the same priority and the same
2389          *     subport.
2390          */
2391         TAILQ_FOREACH(ns, nl, node) {
2392                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2393                 uint32_t id;
2394
2395                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2396                         continue;
2397
2398                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2399                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2400
2401                 TAILQ_FOREACH(nt, nl, node) {
2402                         struct tm_shared_shaper *subport_ss, *tc_ss;
2403
2404                         if (nt->level != TM_NODE_LEVEL_TC ||
2405                                 nt->parent_node->parent_node_id !=
2406                                         ns->node_id)
2407                                 continue;
2408
2409                         subport_ss = s[nt->priority];
2410                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2411
2412                         if (subport_ss == NULL && tc_ss == NULL)
2413                                 continue;
2414
2415                         if ((subport_ss == NULL && tc_ss != NULL) ||
2416                                 (subport_ss != NULL && tc_ss == NULL) ||
2417                                 subport_ss->shared_shaper_id !=
2418                                         tc_ss->shared_shaper_id)
2419                                 return -rte_tm_error_set(error,
2420                                         EINVAL,
2421                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2422                                         NULL,
2423                                         rte_strerror(EINVAL));
2424                 }
2425         }
2426
2427         TAILQ_FOREACH(ss, ssl, node) {
2428                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2429                 uint32_t n_users = 0;
2430
2431                 if (nt_any != NULL)
2432                         TAILQ_FOREACH(nt, nl, node) {
2433                                 if (nt->level != TM_NODE_LEVEL_TC ||
2434                                         nt->priority != nt_any->priority ||
2435                                         nt->parent_node->parent_node_id !=
2436                                         nt_any->parent_node->parent_node_id)
2437                                         continue;
2438
2439                                 n_users++;
2440                         }
2441
2442                 if (ss->n_users == 0 || ss->n_users != n_users)
2443                         return -rte_tm_error_set(error,
2444                                 EINVAL,
2445                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2446                                 NULL,
2447                                 rte_strerror(EINVAL));
2448         }
2449
2450         /* Not too many pipe profiles. */
2451         if (pipe_profiles_generate(dev))
2452                 return -rte_tm_error_set(error,
2453                         EINVAL,
2454                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2455                         NULL,
2456                         rte_strerror(EINVAL));
2457
2458         /**
2459          * WRED (when used, i.e. at least one WRED profile defined):
2460          *    -Each WRED profile must have at least one user.
2461          *    -All leaf nodes must have their private WRED context enabled.
2462          *    -For each TC #i, all leaf nodes must use the same WRED profile
2463          *     for their private WRED context.
2464          */
2465         if (h->n_wred_profiles) {
2466                 struct tm_wred_profile *wp;
2467                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2468                 uint32_t id;
2469
2470                 TAILQ_FOREACH(wp, wpl, node)
2471                         if (wp->n_users == 0)
2472                                 return -rte_tm_error_set(error,
2473                                         EINVAL,
2474                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2475                                         NULL,
2476                                         rte_strerror(EINVAL));
2477
2478                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2479                         w[id] = tm_tc_wred_profile_get(dev, id);
2480
2481                         if (w[id] == NULL)
2482                                 return -rte_tm_error_set(error,
2483                                         EINVAL,
2484                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2485                                         NULL,
2486                                         rte_strerror(EINVAL));
2487                 }
2488
2489                 TAILQ_FOREACH(nq, nl, node) {
2490                         uint32_t id;
2491
2492                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2493                                 continue;
2494
2495                         id = nq->parent_node->priority;
2496
2497                         if (nq->wred_profile == NULL ||
2498                                 nq->wred_profile->wred_profile_id !=
2499                                         w[id]->wred_profile_id)
2500                                 return -rte_tm_error_set(error,
2501                                         EINVAL,
2502                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2503                                         NULL,
2504                                         rte_strerror(EINVAL));
2505                 }
2506         }
2507
2508         return 0;
2509 }
2510
2511 static void
2512 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2513 {
2514         struct pmd_internals *p = dev->data->dev_private;
2515         struct tm_params *t = &p->soft.tm.params;
2516         struct tm_hierarchy *h = &p->soft.tm.h;
2517
2518         struct tm_node_list *nl = &h->nodes;
2519         struct tm_node *root = tm_root_node_present(dev), *n;
2520
2521         uint32_t subport_id;
2522
2523         t->port_params = (struct rte_sched_port_params) {
2524                 .name = dev->data->name,
2525                 .socket = dev->data->numa_node,
2526                 .rate = root->shaper_profile->params.peak.rate,
2527                 .mtu = dev->data->mtu,
2528                 .frame_overhead =
2529                         root->shaper_profile->params.pkt_length_adjust,
2530                 .n_subports_per_port = root->n_children,
2531                 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2532         };
2533
2534         subport_id = 0;
2535         TAILQ_FOREACH(n, nl, node) {
2536                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2537                 uint32_t i;
2538
2539                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2540                         continue;
2541
2542                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2543                         struct tm_shared_shaper *ss;
2544                         struct tm_shaper_profile *sp;
2545
2546                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2547                         sp = (ss) ? tm_shaper_profile_search(dev,
2548                                 ss->shaper_profile_id) :
2549                                 n->shaper_profile;
2550                         tc_rate[i] = sp->params.peak.rate;
2551                 }
2552
2553                 t->subport_params[subport_id] =
2554                         (struct rte_sched_subport_params) {
2555                                 .tb_rate = n->shaper_profile->params.peak.rate,
2556                                 .tb_size = n->shaper_profile->params.peak.size,
2557
2558                                 .tc_rate = {tc_rate[0],
2559                                         tc_rate[1],
2560                                         tc_rate[2],
2561                                         tc_rate[3],
2562                                         tc_rate[4],
2563                                         tc_rate[5],
2564                                         tc_rate[6],
2565                                         tc_rate[7],
2566                                         tc_rate[8],
2567                                         tc_rate[9],
2568                                         tc_rate[10],
2569                                         tc_rate[11],
2570                                         tc_rate[12],
2571                                 },
2572                                 .tc_period = SUBPORT_TC_PERIOD,
2573                                 .n_pipes_per_subport_enabled =
2574                                         h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2575                                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2576                                 .qsize = {p->params.tm.qsize[0],
2577                                         p->params.tm.qsize[1],
2578                                         p->params.tm.qsize[2],
2579                                         p->params.tm.qsize[3],
2580                                         p->params.tm.qsize[4],
2581                                         p->params.tm.qsize[5],
2582                                         p->params.tm.qsize[6],
2583                                         p->params.tm.qsize[7],
2584                                         p->params.tm.qsize[8],
2585                                         p->params.tm.qsize[9],
2586                                         p->params.tm.qsize[10],
2587                                         p->params.tm.qsize[11],
2588                                         p->params.tm.qsize[12],
2589                                 },
2590                                 .pipe_profiles = t->pipe_profiles,
2591                                 .n_pipe_profiles = t->n_pipe_profiles,
2592                                 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2593                 };
2594                 wred_profiles_set(dev, subport_id);
2595                 subport_id++;
2596         }
2597 }
2598
2599 /* Traffic manager hierarchy commit */
2600 static int
2601 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2602         int clear_on_fail,
2603         struct rte_tm_error *error)
2604 {
2605         struct pmd_internals *p = dev->data->dev_private;
2606         int status;
2607
2608         /* Checks */
2609         if (p->soft.tm.hierarchy_frozen)
2610                 return -rte_tm_error_set(error,
2611                         EBUSY,
2612                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2613                         NULL,
2614                         rte_strerror(EBUSY));
2615
2616         status = hierarchy_commit_check(dev, error);
2617         if (status) {
2618                 if (clear_on_fail)
2619                         tm_hierarchy_free(p);
2620
2621                 return status;
2622         }
2623
2624         /* Create blueprints */
2625         hierarchy_blueprints_create(dev);
2626
2627         /* Freeze hierarchy */
2628         p->soft.tm.hierarchy_frozen = 1;
2629
2630         return 0;
2631 }
2632
2633 #ifdef RTE_SCHED_SUBPORT_TC_OV
2634
2635 static int
2636 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2637 {
2638         struct pmd_internals *p = dev->data->dev_private;
2639         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2640
2641         struct tm_node *ns = np->parent_node;
2642         uint32_t subport_id = tm_node_subport_id(dev, ns);
2643
2644         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2645         struct rte_sched_pipe_params profile1;
2646         uint32_t pipe_profile_id;
2647
2648         /* Derive new pipe profile. */
2649         memcpy(&profile1, profile0, sizeof(profile1));
2650         profile1.tc_ov_weight = (uint8_t)weight;
2651
2652         /* Since implementation does not allow adding more pipe profiles after
2653          * port configuration, the pipe configuration can be successfully
2654          * updated only if the new profile is also part of the existing set of
2655          * pipe profiles.
2656          */
2657         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2658                 return -1;
2659
2660         /* Update the pipe profile used by the current pipe. */
2661         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2662                 (int32_t)pipe_profile_id))
2663                 return -1;
2664
2665         /* Commit changes. */
2666         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2667         np->weight = weight;
2668
2669         return 0;
2670 }
2671
2672 #endif
2673
2674 static int
2675 update_queue_weight(struct rte_eth_dev *dev,
2676         struct tm_node *nq, uint32_t weight)
2677 {
2678         struct pmd_internals *p = dev->data->dev_private;
2679         uint32_t queue_id = tm_node_queue_id(dev, nq);
2680
2681         struct tm_node *nt = nq->parent_node;
2682
2683         struct tm_node *np = nt->parent_node;
2684         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2685
2686         struct tm_node *ns = np->parent_node;
2687         uint32_t subport_id = tm_node_subport_id(dev, ns);
2688
2689         uint32_t pipe_be_queue_id =
2690                 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2691
2692         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2693         struct rte_sched_pipe_params profile1;
2694         uint32_t pipe_profile_id;
2695
2696         /* Derive new pipe profile. */
2697         memcpy(&profile1, profile0, sizeof(profile1));
2698         profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2699
2700         /* Since implementation does not allow adding more pipe profiles after
2701          * port configuration, the pipe configuration can be successfully
2702          * updated only if the new profile is also part of the existing set
2703          * of pipe profiles.
2704          */
2705         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2706                 return -1;
2707
2708         /* Update the pipe profile used by the current pipe. */
2709         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2710                 (int32_t)pipe_profile_id))
2711                 return -1;
2712
2713         /* Commit changes. */
2714         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2715         nq->weight = weight;
2716
2717         return 0;
2718 }
2719
2720 /* Traffic manager node parent update */
2721 static int
2722 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2723         uint32_t node_id,
2724         uint32_t parent_node_id,
2725         uint32_t priority,
2726         uint32_t weight,
2727         struct rte_tm_error *error)
2728 {
2729         struct tm_node *n;
2730
2731         /* Port must be started and TM used. */
2732         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2733                 return -rte_tm_error_set(error,
2734                         EBUSY,
2735                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2736                         NULL,
2737                         rte_strerror(EBUSY));
2738
2739         /* Node must be valid */
2740         n = tm_node_search(dev, node_id);
2741         if (n == NULL)
2742                 return -rte_tm_error_set(error,
2743                         EINVAL,
2744                         RTE_TM_ERROR_TYPE_NODE_ID,
2745                         NULL,
2746                         rte_strerror(EINVAL));
2747
2748         /* Parent node must be the same */
2749         if (n->parent_node_id != parent_node_id)
2750                 return -rte_tm_error_set(error,
2751                         EINVAL,
2752                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2753                         NULL,
2754                         rte_strerror(EINVAL));
2755
2756         /* Priority must be the same */
2757         if (n->priority != priority)
2758                 return -rte_tm_error_set(error,
2759                         EINVAL,
2760                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2761                         NULL,
2762                         rte_strerror(EINVAL));
2763
2764         /* weight: must be 1 .. 255 */
2765         if (weight == 0 || weight >= UINT8_MAX)
2766                 return -rte_tm_error_set(error,
2767                         EINVAL,
2768                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2769                         NULL,
2770                         rte_strerror(EINVAL));
2771
2772         switch (n->level) {
2773         case TM_NODE_LEVEL_PORT:
2774                 return -rte_tm_error_set(error,
2775                         EINVAL,
2776                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2777                         NULL,
2778                         rte_strerror(EINVAL));
2779                 /* fall-through */
2780         case TM_NODE_LEVEL_SUBPORT:
2781                 return -rte_tm_error_set(error,
2782                         EINVAL,
2783                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2784                         NULL,
2785                         rte_strerror(EINVAL));
2786                 /* fall-through */
2787         case TM_NODE_LEVEL_PIPE:
2788 #ifdef RTE_SCHED_SUBPORT_TC_OV
2789                 if (update_pipe_weight(dev, n, weight))
2790                         return -rte_tm_error_set(error,
2791                                 EINVAL,
2792                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2793                                 NULL,
2794                                 rte_strerror(EINVAL));
2795                 return 0;
2796 #else
2797                 return -rte_tm_error_set(error,
2798                         EINVAL,
2799                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2800                         NULL,
2801                         rte_strerror(EINVAL));
2802 #endif
2803                 /* fall-through */
2804         case TM_NODE_LEVEL_TC:
2805                 return -rte_tm_error_set(error,
2806                         EINVAL,
2807                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2808                         NULL,
2809                         rte_strerror(EINVAL));
2810                 /* fall-through */
2811         case TM_NODE_LEVEL_QUEUE:
2812                 /* fall-through */
2813         default:
2814                 if (update_queue_weight(dev, n, weight))
2815                         return -rte_tm_error_set(error,
2816                                 EINVAL,
2817                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2818                                 NULL,
2819                                 rte_strerror(EINVAL));
2820                 return 0;
2821         }
2822 }
2823
2824 static int
2825 update_subport_rate(struct rte_eth_dev *dev,
2826         struct tm_node *ns,
2827         struct tm_shaper_profile *sp)
2828 {
2829         struct pmd_internals *p = dev->data->dev_private;
2830         uint32_t subport_id = tm_node_subport_id(dev, ns);
2831
2832         struct rte_sched_subport_params subport_params;
2833
2834         /* Derive new subport configuration. */
2835         memcpy(&subport_params,
2836                 &p->soft.tm.params.subport_params[subport_id],
2837                 sizeof(subport_params));
2838         subport_params.tb_rate = sp->params.peak.rate;
2839         subport_params.tb_size = sp->params.peak.size;
2840
2841         /* Update the subport configuration. */
2842         if (rte_sched_subport_config(SCHED(p), subport_id,
2843                 &subport_params))
2844                 return -1;
2845
2846         /* Commit changes. */
2847         ns->shaper_profile->n_users--;
2848
2849         ns->shaper_profile = sp;
2850         ns->params.shaper_profile_id = sp->shaper_profile_id;
2851         sp->n_users++;
2852
2853         memcpy(&p->soft.tm.params.subport_params[subport_id],
2854                 &subport_params,
2855                 sizeof(subport_params));
2856
2857         return 0;
2858 }
2859
2860 static int
2861 update_pipe_rate(struct rte_eth_dev *dev,
2862         struct tm_node *np,
2863         struct tm_shaper_profile *sp)
2864 {
2865         struct pmd_internals *p = dev->data->dev_private;
2866         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2867
2868         struct tm_node *ns = np->parent_node;
2869         uint32_t subport_id = tm_node_subport_id(dev, ns);
2870
2871         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2872         struct rte_sched_pipe_params profile1;
2873         uint32_t pipe_profile_id;
2874
2875         /* Derive new pipe profile. */
2876         memcpy(&profile1, profile0, sizeof(profile1));
2877         profile1.tb_rate = sp->params.peak.rate;
2878         profile1.tb_size = sp->params.peak.size;
2879
2880         /* Since implementation does not allow adding more pipe profiles after
2881          * port configuration, the pipe configuration can be successfully
2882          * updated only if the new profile is also part of the existing set of
2883          * pipe profiles.
2884          */
2885         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2886                 return -1;
2887
2888         /* Update the pipe profile used by the current pipe. */
2889         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2890                 (int32_t)pipe_profile_id))
2891                 return -1;
2892
2893         /* Commit changes. */
2894         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2895         np->shaper_profile->n_users--;
2896         np->shaper_profile = sp;
2897         np->params.shaper_profile_id = sp->shaper_profile_id;
2898         sp->n_users++;
2899
2900         return 0;
2901 }
2902
2903 static int
2904 update_tc_rate(struct rte_eth_dev *dev,
2905         struct tm_node *nt,
2906         struct tm_shaper_profile *sp)
2907 {
2908         struct pmd_internals *p = dev->data->dev_private;
2909         uint32_t tc_id = tm_node_tc_id(dev, nt);
2910
2911         struct tm_node *np = nt->parent_node;
2912         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2913
2914         struct tm_node *ns = np->parent_node;
2915         uint32_t subport_id = tm_node_subport_id(dev, ns);
2916
2917         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2918         struct rte_sched_pipe_params profile1;
2919         uint32_t pipe_profile_id;
2920
2921         /* Derive new pipe profile. */
2922         memcpy(&profile1, profile0, sizeof(profile1));
2923         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2924
2925         /* Since implementation does not allow adding more pipe profiles after
2926          * port configuration, the pipe configuration can be successfully
2927          * updated only if the new profile is also part of the existing set of
2928          * pipe profiles.
2929          */
2930         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2931                 return -1;
2932
2933         /* Update the pipe profile used by the current pipe. */
2934         if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2935                 (int32_t)pipe_profile_id))
2936                 return -1;
2937
2938         /* Commit changes. */
2939         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2940         nt->shaper_profile->n_users--;
2941         nt->shaper_profile = sp;
2942         nt->params.shaper_profile_id = sp->shaper_profile_id;
2943         sp->n_users++;
2944
2945         return 0;
2946 }
2947
2948 /* Traffic manager node shaper update */
2949 static int
2950 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2951         uint32_t node_id,
2952         uint32_t shaper_profile_id,
2953         struct rte_tm_error *error)
2954 {
2955         struct tm_node *n;
2956         struct tm_shaper_profile *sp;
2957
2958         /* Port must be started and TM used. */
2959         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2960                 return -rte_tm_error_set(error,
2961                         EBUSY,
2962                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2963                         NULL,
2964                         rte_strerror(EBUSY));
2965
2966         /* Node must be valid */
2967         n = tm_node_search(dev, node_id);
2968         if (n == NULL)
2969                 return -rte_tm_error_set(error,
2970                         EINVAL,
2971                         RTE_TM_ERROR_TYPE_NODE_ID,
2972                         NULL,
2973                         rte_strerror(EINVAL));
2974
2975         /* Shaper profile must be valid. */
2976         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2977         if (sp == NULL)
2978                 return -rte_tm_error_set(error,
2979                         EINVAL,
2980                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2981                         NULL,
2982                         rte_strerror(EINVAL));
2983
2984         switch (n->level) {
2985         case TM_NODE_LEVEL_PORT:
2986                 return -rte_tm_error_set(error,
2987                         EINVAL,
2988                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2989                         NULL,
2990                         rte_strerror(EINVAL));
2991                 /* fall-through */
2992         case TM_NODE_LEVEL_SUBPORT:
2993                 if (update_subport_rate(dev, n, sp))
2994                         return -rte_tm_error_set(error,
2995                                 EINVAL,
2996                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2997                                 NULL,
2998                                 rte_strerror(EINVAL));
2999                 return 0;
3000                 /* fall-through */
3001         case TM_NODE_LEVEL_PIPE:
3002                 if (update_pipe_rate(dev, n, sp))
3003                         return -rte_tm_error_set(error,
3004                                 EINVAL,
3005                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3006                                 NULL,
3007                                 rte_strerror(EINVAL));
3008                 return 0;
3009                 /* fall-through */
3010         case TM_NODE_LEVEL_TC:
3011                 if (update_tc_rate(dev, n, sp))
3012                         return -rte_tm_error_set(error,
3013                                 EINVAL,
3014                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3015                                 NULL,
3016                                 rte_strerror(EINVAL));
3017                 return 0;
3018                 /* fall-through */
3019         case TM_NODE_LEVEL_QUEUE:
3020                 /* fall-through */
3021         default:
3022                 return -rte_tm_error_set(error,
3023                         EINVAL,
3024                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3025                         NULL,
3026                         rte_strerror(EINVAL));
3027         }
3028 }
3029
3030 static inline uint32_t
3031 tm_port_queue_id(struct rte_eth_dev *dev,
3032         uint32_t port_subport_id,
3033         uint32_t subport_pipe_id,
3034         uint32_t pipe_tc_id,
3035         uint32_t tc_queue_id)
3036 {
3037         struct pmd_internals *p = dev->data->dev_private;
3038         struct tm_hierarchy *h = &p->soft.tm.h;
3039         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3040                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3041
3042         uint32_t port_pipe_id =
3043                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3044
3045         uint32_t port_queue_id =
3046                 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3047
3048         return port_queue_id;
3049 }
3050
3051 static int
3052 read_port_stats(struct rte_eth_dev *dev,
3053         struct tm_node *nr,
3054         struct rte_tm_node_stats *stats,
3055         uint64_t *stats_mask,
3056         int clear)
3057 {
3058         struct pmd_internals *p = dev->data->dev_private;
3059         struct tm_hierarchy *h = &p->soft.tm.h;
3060         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3061         uint32_t subport_id;
3062
3063         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3064                 struct rte_sched_subport_stats s;
3065                 uint32_t tc_ov, id;
3066
3067                 /* Stats read */
3068                 int status = rte_sched_subport_read_stats(SCHED(p),
3069                         subport_id,
3070                         &s,
3071                         &tc_ov);
3072                 if (status)
3073                         return status;
3074
3075                 /* Stats accumulate */
3076                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3077                         nr->stats.n_pkts +=
3078                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3079                         nr->stats.n_bytes +=
3080                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3081                         nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3082                                 s.n_pkts_tc_dropped[id];
3083                         nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3084                                 s.n_bytes_tc_dropped[id];
3085                 }
3086         }
3087
3088         /* Stats copy */
3089         if (stats)
3090                 memcpy(stats, &nr->stats, sizeof(*stats));
3091
3092         if (stats_mask)
3093                 *stats_mask = STATS_MASK_DEFAULT;
3094
3095         /* Stats clear */
3096         if (clear)
3097                 memset(&nr->stats, 0, sizeof(nr->stats));
3098
3099         return 0;
3100 }
3101
3102 static int
3103 read_subport_stats(struct rte_eth_dev *dev,
3104         struct tm_node *ns,
3105         struct rte_tm_node_stats *stats,
3106         uint64_t *stats_mask,
3107         int clear)
3108 {
3109         struct pmd_internals *p = dev->data->dev_private;
3110         uint32_t subport_id = tm_node_subport_id(dev, ns);
3111         struct rte_sched_subport_stats s;
3112         uint32_t tc_ov, tc_id;
3113
3114         /* Stats read */
3115         int status = rte_sched_subport_read_stats(SCHED(p),
3116                 subport_id,
3117                 &s,
3118                 &tc_ov);
3119         if (status)
3120                 return status;
3121
3122         /* Stats accumulate */
3123         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3124                 ns->stats.n_pkts +=
3125                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3126                 ns->stats.n_bytes +=
3127                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3128                 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3129                         s.n_pkts_tc_dropped[tc_id];
3130                 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3131                         s.n_bytes_tc_dropped[tc_id];
3132         }
3133
3134         /* Stats copy */
3135         if (stats)
3136                 memcpy(stats, &ns->stats, sizeof(*stats));
3137
3138         if (stats_mask)
3139                 *stats_mask = STATS_MASK_DEFAULT;
3140
3141         /* Stats clear */
3142         if (clear)
3143                 memset(&ns->stats, 0, sizeof(ns->stats));
3144
3145         return 0;
3146 }
3147
3148 static int
3149 read_pipe_stats(struct rte_eth_dev *dev,
3150         struct tm_node *np,
3151         struct rte_tm_node_stats *stats,
3152         uint64_t *stats_mask,
3153         int clear)
3154 {
3155         struct pmd_internals *p = dev->data->dev_private;
3156
3157         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3158
3159         struct tm_node *ns = np->parent_node;
3160         uint32_t subport_id = tm_node_subport_id(dev, ns);
3161         uint32_t tc_id, queue_id;
3162         uint32_t i;
3163
3164         /* Stats read */
3165         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3166                 struct rte_sched_queue_stats s;
3167                 uint16_t qlen;
3168
3169                 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3170                         tc_id = i;
3171                         queue_id = i;
3172                 } else {
3173                         tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3174                         queue_id = i - tc_id;
3175                 }
3176
3177                 uint32_t qid = tm_port_queue_id(dev,
3178                         subport_id,
3179                         pipe_id,
3180                         tc_id,
3181                         queue_id);
3182
3183                 int status = rte_sched_queue_read_stats(SCHED(p),
3184                         qid,
3185                         &s,
3186                         &qlen);
3187                 if (status)
3188                         return status;
3189
3190                 /* Stats accumulate */
3191                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3192                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3193                 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3194                 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3195                         s.n_bytes_dropped;
3196                 np->stats.leaf.n_pkts_queued = qlen;
3197         }
3198
3199         /* Stats copy */
3200         if (stats)
3201                 memcpy(stats, &np->stats, sizeof(*stats));
3202
3203         if (stats_mask)
3204                 *stats_mask = STATS_MASK_DEFAULT;
3205
3206         /* Stats clear */
3207         if (clear)
3208                 memset(&np->stats, 0, sizeof(np->stats));
3209
3210         return 0;
3211 }
3212
3213 static int
3214 read_tc_stats(struct rte_eth_dev *dev,
3215         struct tm_node *nt,
3216         struct rte_tm_node_stats *stats,
3217         uint64_t *stats_mask,
3218         int clear)
3219 {
3220         struct pmd_internals *p = dev->data->dev_private;
3221
3222         uint32_t tc_id = tm_node_tc_id(dev, nt);
3223
3224         struct tm_node *np = nt->parent_node;
3225         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3226
3227         struct tm_node *ns = np->parent_node;
3228         uint32_t subport_id = tm_node_subport_id(dev, ns);
3229         struct rte_sched_queue_stats s;
3230         uint32_t qid, i;
3231         uint16_t qlen;
3232         int status;
3233
3234         /* Stats read */
3235         if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3236                 qid = tm_port_queue_id(dev,
3237                         subport_id,
3238                         pipe_id,
3239                         tc_id,
3240                         0);
3241
3242                 status = rte_sched_queue_read_stats(SCHED(p),
3243                         qid,
3244                         &s,
3245                         &qlen);
3246                 if (status)
3247                         return status;
3248
3249                 /* Stats accumulate */
3250                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3251                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3252                 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3253                 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3254                         s.n_bytes_dropped;
3255                 nt->stats.leaf.n_pkts_queued = qlen;
3256         } else {
3257                 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3258                         qid = tm_port_queue_id(dev,
3259                                 subport_id,
3260                                 pipe_id,
3261                                 tc_id,
3262                                 i);
3263
3264                         status = rte_sched_queue_read_stats(SCHED(p),
3265                                 qid,
3266                                 &s,
3267                                 &qlen);
3268                         if (status)
3269                                 return status;
3270
3271                         /* Stats accumulate */
3272                         nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3273                         nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3274                         nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3275                                 s.n_pkts_dropped;
3276                         nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3277                                 s.n_bytes_dropped;
3278                         nt->stats.leaf.n_pkts_queued = qlen;
3279                 }
3280         }
3281
3282         /* Stats copy */
3283         if (stats)
3284                 memcpy(stats, &nt->stats, sizeof(*stats));
3285
3286         if (stats_mask)
3287                 *stats_mask = STATS_MASK_DEFAULT;
3288
3289         /* Stats clear */
3290         if (clear)
3291                 memset(&nt->stats, 0, sizeof(nt->stats));
3292
3293         return 0;
3294 }
3295
3296 static int
3297 read_queue_stats(struct rte_eth_dev *dev,
3298         struct tm_node *nq,
3299         struct rte_tm_node_stats *stats,
3300         uint64_t *stats_mask,
3301         int clear)
3302 {
3303         struct pmd_internals *p = dev->data->dev_private;
3304         struct rte_sched_queue_stats s;
3305         uint16_t qlen;
3306
3307         uint32_t queue_id = tm_node_queue_id(dev, nq);
3308
3309         struct tm_node *nt = nq->parent_node;
3310         uint32_t tc_id = tm_node_tc_id(dev, nt);
3311
3312         struct tm_node *np = nt->parent_node;
3313         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3314
3315         struct tm_node *ns = np->parent_node;
3316         uint32_t subport_id = tm_node_subport_id(dev, ns);
3317
3318         /* Stats read */
3319         uint32_t qid = tm_port_queue_id(dev,
3320                 subport_id,
3321                 pipe_id,
3322                 tc_id,
3323                 queue_id);
3324
3325         int status = rte_sched_queue_read_stats(SCHED(p),
3326                 qid,
3327                 &s,
3328                 &qlen);
3329         if (status)
3330                 return status;
3331
3332         /* Stats accumulate */
3333         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3334         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3335         nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3336         nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3337                 s.n_bytes_dropped;
3338         nq->stats.leaf.n_pkts_queued = qlen;
3339
3340         /* Stats copy */
3341         if (stats)
3342                 memcpy(stats, &nq->stats, sizeof(*stats));
3343
3344         if (stats_mask)
3345                 *stats_mask = STATS_MASK_QUEUE;
3346
3347         /* Stats clear */
3348         if (clear)
3349                 memset(&nq->stats, 0, sizeof(nq->stats));
3350
3351         return 0;
3352 }
3353
3354 /* Traffic manager read stats counters for specific node */
3355 static int
3356 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3357         uint32_t node_id,
3358         struct rte_tm_node_stats *stats,
3359         uint64_t *stats_mask,
3360         int clear,
3361         struct rte_tm_error *error)
3362 {
3363         struct tm_node *n;
3364
3365         /* Port must be started and TM used. */
3366         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3367                 return -rte_tm_error_set(error,
3368                         EBUSY,
3369                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3370                         NULL,
3371                         rte_strerror(EBUSY));
3372
3373         /* Node must be valid */
3374         n = tm_node_search(dev, node_id);
3375         if (n == NULL)
3376                 return -rte_tm_error_set(error,
3377                         EINVAL,
3378                         RTE_TM_ERROR_TYPE_NODE_ID,
3379                         NULL,
3380                         rte_strerror(EINVAL));
3381
3382         switch (n->level) {
3383         case TM_NODE_LEVEL_PORT:
3384                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3385                         return -rte_tm_error_set(error,
3386                                 EINVAL,
3387                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3388                                 NULL,
3389                                 rte_strerror(EINVAL));
3390                 return 0;
3391
3392         case TM_NODE_LEVEL_SUBPORT:
3393                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3394                         return -rte_tm_error_set(error,
3395                                 EINVAL,
3396                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3397                                 NULL,
3398                                 rte_strerror(EINVAL));
3399                 return 0;
3400
3401         case TM_NODE_LEVEL_PIPE:
3402                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3403                         return -rte_tm_error_set(error,
3404                                 EINVAL,
3405                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3406                                 NULL,
3407                                 rte_strerror(EINVAL));
3408                 return 0;
3409
3410         case TM_NODE_LEVEL_TC:
3411                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3412                         return -rte_tm_error_set(error,
3413                                 EINVAL,
3414                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3415                                 NULL,
3416                                 rte_strerror(EINVAL));
3417                 return 0;
3418
3419         case TM_NODE_LEVEL_QUEUE:
3420         default:
3421                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3422                         return -rte_tm_error_set(error,
3423                                 EINVAL,
3424                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3425                                 NULL,
3426                                 rte_strerror(EINVAL));
3427                 return 0;
3428         }
3429 }
3430
3431 const struct rte_tm_ops pmd_tm_ops = {
3432         .node_type_get = pmd_tm_node_type_get,
3433         .capabilities_get = pmd_tm_capabilities_get,
3434         .level_capabilities_get = pmd_tm_level_capabilities_get,
3435         .node_capabilities_get = pmd_tm_node_capabilities_get,
3436
3437         .wred_profile_add = pmd_tm_wred_profile_add,
3438         .wred_profile_delete = pmd_tm_wred_profile_delete,
3439         .shared_wred_context_add_update = NULL,
3440         .shared_wred_context_delete = NULL,
3441
3442         .shaper_profile_add = pmd_tm_shaper_profile_add,
3443         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3444         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3445         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3446
3447         .node_add = pmd_tm_node_add,
3448         .node_delete = pmd_tm_node_delete,
3449         .node_suspend = NULL,
3450         .node_resume = NULL,
3451         .hierarchy_commit = pmd_tm_hierarchy_commit,
3452
3453         .node_parent_update = pmd_tm_node_parent_update,
3454         .node_shaper_update = pmd_tm_node_shaper_update,
3455         .node_shared_shaper_update = NULL,
3456         .node_stats_update = NULL,
3457         .node_wfq_weight_mode_update = NULL,
3458         .node_cman_update = NULL,
3459         .node_wred_context_update = NULL,
3460         .node_shared_wred_context_update = NULL,
3461
3462         .node_stats_read = pmd_tm_node_stats_read,
3463 };