net/softnic: add TM capabilities ops
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37
38 #include <rte_malloc.h>
39
40 #include "rte_eth_softnic_internals.h"
41 #include "rte_eth_softnic.h"
42
43 #define BYTES_IN_MBPS (1000 * 1000 / 8)
44
45 int
46 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
47 {
48         uint64_t hard_rate_bytes_per_sec = hard_rate * BYTES_IN_MBPS;
49         uint32_t i;
50
51         /* rate */
52         if (params->soft.tm.rate) {
53                 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
54                         return -EINVAL;
55         } else {
56                 params->soft.tm.rate =
57                         (hard_rate_bytes_per_sec > UINT32_MAX) ?
58                                 UINT32_MAX : hard_rate_bytes_per_sec;
59         }
60
61         /* nb_queues */
62         if (params->soft.tm.nb_queues == 0)
63                 return -EINVAL;
64
65         if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
66                 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
67
68         params->soft.tm.nb_queues =
69                 rte_align32pow2(params->soft.tm.nb_queues);
70
71         /* qsize */
72         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
73                 if (params->soft.tm.qsize[i] == 0)
74                         return -EINVAL;
75
76                 params->soft.tm.qsize[i] =
77                         rte_align32pow2(params->soft.tm.qsize[i]);
78         }
79
80         /* enq_bsz, deq_bsz */
81         if (params->soft.tm.enq_bsz == 0 ||
82                 params->soft.tm.deq_bsz == 0 ||
83                 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
84                 return -EINVAL;
85
86         return 0;
87 }
88
89 int
90 tm_init(struct pmd_internals *p,
91         struct pmd_params *params,
92         int numa_node)
93 {
94         uint32_t enq_bsz = params->soft.tm.enq_bsz;
95         uint32_t deq_bsz = params->soft.tm.deq_bsz;
96
97         p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
98                 2 * enq_bsz * sizeof(struct rte_mbuf *),
99                 0,
100                 numa_node);
101
102         if (p->soft.tm.pkts_enq == NULL)
103                 return -ENOMEM;
104
105         p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
106                 deq_bsz * sizeof(struct rte_mbuf *),
107                 0,
108                 numa_node);
109
110         if (p->soft.tm.pkts_deq == NULL) {
111                 rte_free(p->soft.tm.pkts_enq);
112                 return -ENOMEM;
113         }
114
115         return 0;
116 }
117
118 void
119 tm_free(struct pmd_internals *p)
120 {
121         rte_free(p->soft.tm.pkts_enq);
122         rte_free(p->soft.tm.pkts_deq);
123 }
124
125 int
126 tm_start(struct pmd_internals *p)
127 {
128         struct tm_params *t = &p->soft.tm.params;
129         uint32_t n_subports, subport_id;
130         int status;
131
132         /* Port */
133         p->soft.tm.sched = rte_sched_port_config(&t->port_params);
134         if (p->soft.tm.sched == NULL)
135                 return -1;
136
137         /* Subport */
138         n_subports = t->port_params.n_subports_per_port;
139         for (subport_id = 0; subport_id < n_subports; subport_id++) {
140                 uint32_t n_pipes_per_subport =
141                         t->port_params.n_pipes_per_subport;
142                 uint32_t pipe_id;
143
144                 status = rte_sched_subport_config(p->soft.tm.sched,
145                         subport_id,
146                         &t->subport_params[subport_id]);
147                 if (status) {
148                         rte_sched_port_free(p->soft.tm.sched);
149                         return -1;
150                 }
151
152                 /* Pipe */
153                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
154                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
155                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
156                                 pipe_id;
157                         int profile_id = t->pipe_to_profile[pos];
158
159                         if (profile_id < 0)
160                                 continue;
161
162                         status = rte_sched_pipe_config(p->soft.tm.sched,
163                                 subport_id,
164                                 pipe_id,
165                                 profile_id);
166                         if (status) {
167                                 rte_sched_port_free(p->soft.tm.sched);
168                                 return -1;
169                         }
170                 }
171         }
172
173         return 0;
174 }
175
176 void
177 tm_stop(struct pmd_internals *p)
178 {
179         if (p->soft.tm.sched)
180                 rte_sched_port_free(p->soft.tm.sched);
181 }
182
183 static struct tm_node *
184 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
185 {
186         struct pmd_internals *p = dev->data->dev_private;
187         struct tm_node_list *nl = &p->soft.tm.h.nodes;
188         struct tm_node *n;
189
190         TAILQ_FOREACH(n, nl, node)
191                 if (n->node_id == node_id)
192                         return n;
193
194         return NULL;
195 }
196
197 static uint32_t
198 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
199 {
200         struct pmd_internals *p = dev->data->dev_private;
201         uint32_t n_queues_max = p->params.soft.tm.nb_queues;
202         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
203         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
204         uint32_t n_subports_max = n_pipes_max;
205         uint32_t n_root_max = 1;
206
207         switch (level) {
208         case TM_NODE_LEVEL_PORT:
209                 return n_root_max;
210         case TM_NODE_LEVEL_SUBPORT:
211                 return n_subports_max;
212         case TM_NODE_LEVEL_PIPE:
213                 return n_pipes_max;
214         case TM_NODE_LEVEL_TC:
215                 return n_tc_max;
216         case TM_NODE_LEVEL_QUEUE:
217         default:
218                 return n_queues_max;
219         }
220 }
221
222 #ifdef RTE_SCHED_RED
223 #define WRED_SUPPORTED                                          1
224 #else
225 #define WRED_SUPPORTED                                          0
226 #endif
227
228 #define STATS_MASK_DEFAULT                                      \
229         (RTE_TM_STATS_N_PKTS |                                  \
230         RTE_TM_STATS_N_BYTES |                                  \
231         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
232         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
233
234 #define STATS_MASK_QUEUE                                                \
235         (STATS_MASK_DEFAULT |                                   \
236         RTE_TM_STATS_N_PKTS_QUEUED)
237
238 static const struct rte_tm_capabilities tm_cap = {
239         .n_nodes_max = UINT32_MAX,
240         .n_levels_max = TM_NODE_LEVEL_MAX,
241
242         .non_leaf_nodes_identical = 0,
243         .leaf_nodes_identical = 1,
244
245         .shaper_n_max = UINT32_MAX,
246         .shaper_private_n_max = UINT32_MAX,
247         .shaper_private_dual_rate_n_max = 0,
248         .shaper_private_rate_min = 1,
249         .shaper_private_rate_max = UINT32_MAX,
250
251         .shaper_shared_n_max = UINT32_MAX,
252         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
253         .shaper_shared_n_shapers_per_node_max = 1,
254         .shaper_shared_dual_rate_n_max = 0,
255         .shaper_shared_rate_min = 1,
256         .shaper_shared_rate_max = UINT32_MAX,
257
258         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
259         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
260
261         .sched_n_children_max = UINT32_MAX,
262         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
263         .sched_wfq_n_children_per_group_max = UINT32_MAX,
264         .sched_wfq_n_groups_max = 1,
265         .sched_wfq_weight_max = UINT32_MAX,
266
267         .cman_head_drop_supported = 0,
268         .cman_wred_context_n_max = 0,
269         .cman_wred_context_private_n_max = 0,
270         .cman_wred_context_shared_n_max = 0,
271         .cman_wred_context_shared_n_nodes_per_context_max = 0,
272         .cman_wred_context_shared_n_contexts_per_node_max = 0,
273
274         .mark_vlan_dei_supported = {0, 0, 0},
275         .mark_ip_ecn_tcp_supported = {0, 0, 0},
276         .mark_ip_ecn_sctp_supported = {0, 0, 0},
277         .mark_ip_dscp_supported = {0, 0, 0},
278
279         .dynamic_update_mask = 0,
280
281         .stats_mask = STATS_MASK_QUEUE,
282 };
283
284 /* Traffic manager capabilities get */
285 static int
286 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
287         struct rte_tm_capabilities *cap,
288         struct rte_tm_error *error)
289 {
290         if (cap == NULL)
291                 return -rte_tm_error_set(error,
292                    EINVAL,
293                    RTE_TM_ERROR_TYPE_CAPABILITIES,
294                    NULL,
295                    rte_strerror(EINVAL));
296
297         memcpy(cap, &tm_cap, sizeof(*cap));
298
299         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
300                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
301                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
302                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
303                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
304
305         cap->shaper_private_n_max =
306                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
307                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
308                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
309                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
310
311         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
312                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
313
314         cap->shaper_n_max = cap->shaper_private_n_max +
315                 cap->shaper_shared_n_max;
316
317         cap->shaper_shared_n_nodes_per_shaper_max =
318                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
319
320         cap->sched_n_children_max = RTE_MAX(
321                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
322                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
323
324         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
325
326         if (WRED_SUPPORTED)
327                 cap->cman_wred_context_private_n_max =
328                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
329
330         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
331                 cap->cman_wred_context_shared_n_max;
332
333         return 0;
334 }
335
336 static const struct rte_tm_level_capabilities tm_level_cap[] = {
337         [TM_NODE_LEVEL_PORT] = {
338                 .n_nodes_max = 1,
339                 .n_nodes_nonleaf_max = 1,
340                 .n_nodes_leaf_max = 0,
341                 .non_leaf_nodes_identical = 1,
342                 .leaf_nodes_identical = 0,
343
344                 .nonleaf = {
345                         .shaper_private_supported = 1,
346                         .shaper_private_dual_rate_supported = 0,
347                         .shaper_private_rate_min = 1,
348                         .shaper_private_rate_max = UINT32_MAX,
349                         .shaper_shared_n_max = 0,
350
351                         .sched_n_children_max = UINT32_MAX,
352                         .sched_sp_n_priorities_max = 1,
353                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
354                         .sched_wfq_n_groups_max = 1,
355                         .sched_wfq_weight_max = 1,
356
357                         .stats_mask = STATS_MASK_DEFAULT,
358                 },
359         },
360
361         [TM_NODE_LEVEL_SUBPORT] = {
362                 .n_nodes_max = UINT32_MAX,
363                 .n_nodes_nonleaf_max = UINT32_MAX,
364                 .n_nodes_leaf_max = 0,
365                 .non_leaf_nodes_identical = 1,
366                 .leaf_nodes_identical = 0,
367
368                 .nonleaf = {
369                         .shaper_private_supported = 1,
370                         .shaper_private_dual_rate_supported = 0,
371                         .shaper_private_rate_min = 1,
372                         .shaper_private_rate_max = UINT32_MAX,
373                         .shaper_shared_n_max = 0,
374
375                         .sched_n_children_max = UINT32_MAX,
376                         .sched_sp_n_priorities_max = 1,
377                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
378                         .sched_wfq_n_groups_max = 1,
379 #ifdef RTE_SCHED_SUBPORT_TC_OV
380                         .sched_wfq_weight_max = UINT32_MAX,
381 #else
382                         .sched_wfq_weight_max = 1,
383 #endif
384                         .stats_mask = STATS_MASK_DEFAULT,
385                 },
386         },
387
388         [TM_NODE_LEVEL_PIPE] = {
389                 .n_nodes_max = UINT32_MAX,
390                 .n_nodes_nonleaf_max = UINT32_MAX,
391                 .n_nodes_leaf_max = 0,
392                 .non_leaf_nodes_identical = 1,
393                 .leaf_nodes_identical = 0,
394
395                 .nonleaf = {
396                         .shaper_private_supported = 1,
397                         .shaper_private_dual_rate_supported = 0,
398                         .shaper_private_rate_min = 1,
399                         .shaper_private_rate_max = UINT32_MAX,
400                         .shaper_shared_n_max = 0,
401
402                         .sched_n_children_max =
403                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
404                         .sched_sp_n_priorities_max =
405                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
406                         .sched_wfq_n_children_per_group_max = 1,
407                         .sched_wfq_n_groups_max = 0,
408                         .sched_wfq_weight_max = 1,
409
410                         .stats_mask = STATS_MASK_DEFAULT,
411                 },
412         },
413
414         [TM_NODE_LEVEL_TC] = {
415                 .n_nodes_max = UINT32_MAX,
416                 .n_nodes_nonleaf_max = UINT32_MAX,
417                 .n_nodes_leaf_max = 0,
418                 .non_leaf_nodes_identical = 1,
419                 .leaf_nodes_identical = 0,
420
421                 .nonleaf = {
422                         .shaper_private_supported = 1,
423                         .shaper_private_dual_rate_supported = 0,
424                         .shaper_private_rate_min = 1,
425                         .shaper_private_rate_max = UINT32_MAX,
426                         .shaper_shared_n_max = 1,
427
428                         .sched_n_children_max =
429                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
430                         .sched_sp_n_priorities_max = 1,
431                         .sched_wfq_n_children_per_group_max =
432                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
433                         .sched_wfq_n_groups_max = 1,
434                         .sched_wfq_weight_max = UINT32_MAX,
435
436                         .stats_mask = STATS_MASK_DEFAULT,
437                 },
438         },
439
440         [TM_NODE_LEVEL_QUEUE] = {
441                 .n_nodes_max = UINT32_MAX,
442                 .n_nodes_nonleaf_max = 0,
443                 .n_nodes_leaf_max = UINT32_MAX,
444                 .non_leaf_nodes_identical = 0,
445                 .leaf_nodes_identical = 1,
446
447                 .leaf = {
448                         .shaper_private_supported = 0,
449                         .shaper_private_dual_rate_supported = 0,
450                         .shaper_private_rate_min = 0,
451                         .shaper_private_rate_max = 0,
452                         .shaper_shared_n_max = 0,
453
454                         .cman_head_drop_supported = 0,
455                         .cman_wred_context_private_supported = WRED_SUPPORTED,
456                         .cman_wred_context_shared_n_max = 0,
457
458                         .stats_mask = STATS_MASK_QUEUE,
459                 },
460         },
461 };
462
463 /* Traffic manager level capabilities get */
464 static int
465 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
466         uint32_t level_id,
467         struct rte_tm_level_capabilities *cap,
468         struct rte_tm_error *error)
469 {
470         if (cap == NULL)
471                 return -rte_tm_error_set(error,
472                    EINVAL,
473                    RTE_TM_ERROR_TYPE_CAPABILITIES,
474                    NULL,
475                    rte_strerror(EINVAL));
476
477         if (level_id >= TM_NODE_LEVEL_MAX)
478                 return -rte_tm_error_set(error,
479                    EINVAL,
480                    RTE_TM_ERROR_TYPE_LEVEL_ID,
481                    NULL,
482                    rte_strerror(EINVAL));
483
484         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
485
486         switch (level_id) {
487         case TM_NODE_LEVEL_PORT:
488                 cap->nonleaf.sched_n_children_max =
489                         tm_level_get_max_nodes(dev,
490                                 TM_NODE_LEVEL_SUBPORT);
491                 cap->nonleaf.sched_wfq_n_children_per_group_max =
492                         cap->nonleaf.sched_n_children_max;
493                 break;
494
495         case TM_NODE_LEVEL_SUBPORT:
496                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
497                         TM_NODE_LEVEL_SUBPORT);
498                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
499                 cap->nonleaf.sched_n_children_max =
500                         tm_level_get_max_nodes(dev,
501                                 TM_NODE_LEVEL_PIPE);
502                 cap->nonleaf.sched_wfq_n_children_per_group_max =
503                         cap->nonleaf.sched_n_children_max;
504                 break;
505
506         case TM_NODE_LEVEL_PIPE:
507                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
508                         TM_NODE_LEVEL_PIPE);
509                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
510                 break;
511
512         case TM_NODE_LEVEL_TC:
513                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
514                         TM_NODE_LEVEL_TC);
515                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
516                 break;
517
518         case TM_NODE_LEVEL_QUEUE:
519         default:
520                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
521                         TM_NODE_LEVEL_QUEUE);
522                 cap->n_nodes_leaf_max = cap->n_nodes_max;
523                 break;
524         }
525
526         return 0;
527 }
528
529 static const struct rte_tm_node_capabilities tm_node_cap[] = {
530         [TM_NODE_LEVEL_PORT] = {
531                 .shaper_private_supported = 1,
532                 .shaper_private_dual_rate_supported = 0,
533                 .shaper_private_rate_min = 1,
534                 .shaper_private_rate_max = UINT32_MAX,
535                 .shaper_shared_n_max = 0,
536
537                 .nonleaf = {
538                         .sched_n_children_max = UINT32_MAX,
539                         .sched_sp_n_priorities_max = 1,
540                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
541                         .sched_wfq_n_groups_max = 1,
542                         .sched_wfq_weight_max = 1,
543                 },
544
545                 .stats_mask = STATS_MASK_DEFAULT,
546         },
547
548         [TM_NODE_LEVEL_SUBPORT] = {
549                 .shaper_private_supported = 1,
550                 .shaper_private_dual_rate_supported = 0,
551                 .shaper_private_rate_min = 1,
552                 .shaper_private_rate_max = UINT32_MAX,
553                 .shaper_shared_n_max = 0,
554
555                 .nonleaf = {
556                         .sched_n_children_max = UINT32_MAX,
557                         .sched_sp_n_priorities_max = 1,
558                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
559                         .sched_wfq_n_groups_max = 1,
560                         .sched_wfq_weight_max = UINT32_MAX,
561                 },
562
563                 .stats_mask = STATS_MASK_DEFAULT,
564         },
565
566         [TM_NODE_LEVEL_PIPE] = {
567                 .shaper_private_supported = 1,
568                 .shaper_private_dual_rate_supported = 0,
569                 .shaper_private_rate_min = 1,
570                 .shaper_private_rate_max = UINT32_MAX,
571                 .shaper_shared_n_max = 0,
572
573                 .nonleaf = {
574                         .sched_n_children_max =
575                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
576                         .sched_sp_n_priorities_max =
577                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
578                         .sched_wfq_n_children_per_group_max = 1,
579                         .sched_wfq_n_groups_max = 0,
580                         .sched_wfq_weight_max = 1,
581                 },
582
583                 .stats_mask = STATS_MASK_DEFAULT,
584         },
585
586         [TM_NODE_LEVEL_TC] = {
587                 .shaper_private_supported = 1,
588                 .shaper_private_dual_rate_supported = 0,
589                 .shaper_private_rate_min = 1,
590                 .shaper_private_rate_max = UINT32_MAX,
591                 .shaper_shared_n_max = 1,
592
593                 .nonleaf = {
594                         .sched_n_children_max =
595                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
596                         .sched_sp_n_priorities_max = 1,
597                         .sched_wfq_n_children_per_group_max =
598                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
599                         .sched_wfq_n_groups_max = 1,
600                         .sched_wfq_weight_max = UINT32_MAX,
601                 },
602
603                 .stats_mask = STATS_MASK_DEFAULT,
604         },
605
606         [TM_NODE_LEVEL_QUEUE] = {
607                 .shaper_private_supported = 0,
608                 .shaper_private_dual_rate_supported = 0,
609                 .shaper_private_rate_min = 0,
610                 .shaper_private_rate_max = 0,
611                 .shaper_shared_n_max = 0,
612
613
614                 .leaf = {
615                         .cman_head_drop_supported = 0,
616                         .cman_wred_context_private_supported = WRED_SUPPORTED,
617                         .cman_wred_context_shared_n_max = 0,
618                 },
619
620                 .stats_mask = STATS_MASK_QUEUE,
621         },
622 };
623
624 /* Traffic manager node capabilities get */
625 static int
626 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
627         uint32_t node_id,
628         struct rte_tm_node_capabilities *cap,
629         struct rte_tm_error *error)
630 {
631         struct tm_node *tm_node;
632
633         if (cap == NULL)
634                 return -rte_tm_error_set(error,
635                    EINVAL,
636                    RTE_TM_ERROR_TYPE_CAPABILITIES,
637                    NULL,
638                    rte_strerror(EINVAL));
639
640         tm_node = tm_node_search(dev, node_id);
641         if (tm_node == NULL)
642                 return -rte_tm_error_set(error,
643                    EINVAL,
644                    RTE_TM_ERROR_TYPE_NODE_ID,
645                    NULL,
646                    rte_strerror(EINVAL));
647
648         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
649
650         switch (tm_node->level) {
651         case TM_NODE_LEVEL_PORT:
652                 cap->nonleaf.sched_n_children_max =
653                         tm_level_get_max_nodes(dev,
654                                 TM_NODE_LEVEL_SUBPORT);
655                 cap->nonleaf.sched_wfq_n_children_per_group_max =
656                         cap->nonleaf.sched_n_children_max;
657                 break;
658
659         case TM_NODE_LEVEL_SUBPORT:
660                 cap->nonleaf.sched_n_children_max =
661                         tm_level_get_max_nodes(dev,
662                                 TM_NODE_LEVEL_PIPE);
663                 cap->nonleaf.sched_wfq_n_children_per_group_max =
664                         cap->nonleaf.sched_n_children_max;
665                 break;
666
667         case TM_NODE_LEVEL_PIPE:
668         case TM_NODE_LEVEL_TC:
669         case TM_NODE_LEVEL_QUEUE:
670         default:
671                 break;
672         }
673
674         return 0;
675 }
676
677 const struct rte_tm_ops pmd_tm_ops = {
678         .capabilities_get = pmd_tm_capabilities_get,
679         .level_capabilities_get = pmd_tm_level_capabilities_get,
680         .node_capabilities_get = pmd_tm_node_capabilities_get,
681 };