4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_malloc.h>
40 #include "rte_eth_softnic_internals.h"
41 #include "rte_eth_softnic.h"
43 #define BYTES_IN_MBPS (1000 * 1000 / 8)
46 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
48 uint64_t hard_rate_bytes_per_sec = hard_rate * BYTES_IN_MBPS;
52 if (params->soft.tm.rate) {
53 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
56 params->soft.tm.rate =
57 (hard_rate_bytes_per_sec > UINT32_MAX) ?
58 UINT32_MAX : hard_rate_bytes_per_sec;
62 if (params->soft.tm.nb_queues == 0)
65 if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
66 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
68 params->soft.tm.nb_queues =
69 rte_align32pow2(params->soft.tm.nb_queues);
72 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
73 if (params->soft.tm.qsize[i] == 0)
76 params->soft.tm.qsize[i] =
77 rte_align32pow2(params->soft.tm.qsize[i]);
80 /* enq_bsz, deq_bsz */
81 if (params->soft.tm.enq_bsz == 0 ||
82 params->soft.tm.deq_bsz == 0 ||
83 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
90 tm_init(struct pmd_internals *p,
91 struct pmd_params *params,
94 uint32_t enq_bsz = params->soft.tm.enq_bsz;
95 uint32_t deq_bsz = params->soft.tm.deq_bsz;
97 p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
98 2 * enq_bsz * sizeof(struct rte_mbuf *),
102 if (p->soft.tm.pkts_enq == NULL)
105 p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
106 deq_bsz * sizeof(struct rte_mbuf *),
110 if (p->soft.tm.pkts_deq == NULL) {
111 rte_free(p->soft.tm.pkts_enq);
119 tm_free(struct pmd_internals *p)
121 rte_free(p->soft.tm.pkts_enq);
122 rte_free(p->soft.tm.pkts_deq);
126 tm_start(struct pmd_internals *p)
128 struct tm_params *t = &p->soft.tm.params;
129 uint32_t n_subports, subport_id;
133 p->soft.tm.sched = rte_sched_port_config(&t->port_params);
134 if (p->soft.tm.sched == NULL)
138 n_subports = t->port_params.n_subports_per_port;
139 for (subport_id = 0; subport_id < n_subports; subport_id++) {
140 uint32_t n_pipes_per_subport =
141 t->port_params.n_pipes_per_subport;
144 status = rte_sched_subport_config(p->soft.tm.sched,
146 &t->subport_params[subport_id]);
148 rte_sched_port_free(p->soft.tm.sched);
153 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
154 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
155 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
157 int profile_id = t->pipe_to_profile[pos];
162 status = rte_sched_pipe_config(p->soft.tm.sched,
167 rte_sched_port_free(p->soft.tm.sched);
177 tm_stop(struct pmd_internals *p)
179 if (p->soft.tm.sched)
180 rte_sched_port_free(p->soft.tm.sched);
183 static struct tm_node *
184 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
186 struct pmd_internals *p = dev->data->dev_private;
187 struct tm_node_list *nl = &p->soft.tm.h.nodes;
190 TAILQ_FOREACH(n, nl, node)
191 if (n->node_id == node_id)
198 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
200 struct pmd_internals *p = dev->data->dev_private;
201 uint32_t n_queues_max = p->params.soft.tm.nb_queues;
202 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
203 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
204 uint32_t n_subports_max = n_pipes_max;
205 uint32_t n_root_max = 1;
208 case TM_NODE_LEVEL_PORT:
210 case TM_NODE_LEVEL_SUBPORT:
211 return n_subports_max;
212 case TM_NODE_LEVEL_PIPE:
214 case TM_NODE_LEVEL_TC:
216 case TM_NODE_LEVEL_QUEUE:
223 #define WRED_SUPPORTED 1
225 #define WRED_SUPPORTED 0
228 #define STATS_MASK_DEFAULT \
229 (RTE_TM_STATS_N_PKTS | \
230 RTE_TM_STATS_N_BYTES | \
231 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
232 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
234 #define STATS_MASK_QUEUE \
235 (STATS_MASK_DEFAULT | \
236 RTE_TM_STATS_N_PKTS_QUEUED)
238 static const struct rte_tm_capabilities tm_cap = {
239 .n_nodes_max = UINT32_MAX,
240 .n_levels_max = TM_NODE_LEVEL_MAX,
242 .non_leaf_nodes_identical = 0,
243 .leaf_nodes_identical = 1,
245 .shaper_n_max = UINT32_MAX,
246 .shaper_private_n_max = UINT32_MAX,
247 .shaper_private_dual_rate_n_max = 0,
248 .shaper_private_rate_min = 1,
249 .shaper_private_rate_max = UINT32_MAX,
251 .shaper_shared_n_max = UINT32_MAX,
252 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
253 .shaper_shared_n_shapers_per_node_max = 1,
254 .shaper_shared_dual_rate_n_max = 0,
255 .shaper_shared_rate_min = 1,
256 .shaper_shared_rate_max = UINT32_MAX,
258 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
259 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
261 .sched_n_children_max = UINT32_MAX,
262 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
263 .sched_wfq_n_children_per_group_max = UINT32_MAX,
264 .sched_wfq_n_groups_max = 1,
265 .sched_wfq_weight_max = UINT32_MAX,
267 .cman_head_drop_supported = 0,
268 .cman_wred_context_n_max = 0,
269 .cman_wred_context_private_n_max = 0,
270 .cman_wred_context_shared_n_max = 0,
271 .cman_wred_context_shared_n_nodes_per_context_max = 0,
272 .cman_wred_context_shared_n_contexts_per_node_max = 0,
274 .mark_vlan_dei_supported = {0, 0, 0},
275 .mark_ip_ecn_tcp_supported = {0, 0, 0},
276 .mark_ip_ecn_sctp_supported = {0, 0, 0},
277 .mark_ip_dscp_supported = {0, 0, 0},
279 .dynamic_update_mask = 0,
281 .stats_mask = STATS_MASK_QUEUE,
284 /* Traffic manager capabilities get */
286 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
287 struct rte_tm_capabilities *cap,
288 struct rte_tm_error *error)
291 return -rte_tm_error_set(error,
293 RTE_TM_ERROR_TYPE_CAPABILITIES,
295 rte_strerror(EINVAL));
297 memcpy(cap, &tm_cap, sizeof(*cap));
299 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
300 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
301 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
302 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
303 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
305 cap->shaper_private_n_max =
306 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
307 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
308 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
309 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
311 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
312 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
314 cap->shaper_n_max = cap->shaper_private_n_max +
315 cap->shaper_shared_n_max;
317 cap->shaper_shared_n_nodes_per_shaper_max =
318 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
320 cap->sched_n_children_max = RTE_MAX(
321 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
322 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
324 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
327 cap->cman_wred_context_private_n_max =
328 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
330 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
331 cap->cman_wred_context_shared_n_max;
336 static const struct rte_tm_level_capabilities tm_level_cap[] = {
337 [TM_NODE_LEVEL_PORT] = {
339 .n_nodes_nonleaf_max = 1,
340 .n_nodes_leaf_max = 0,
341 .non_leaf_nodes_identical = 1,
342 .leaf_nodes_identical = 0,
345 .shaper_private_supported = 1,
346 .shaper_private_dual_rate_supported = 0,
347 .shaper_private_rate_min = 1,
348 .shaper_private_rate_max = UINT32_MAX,
349 .shaper_shared_n_max = 0,
351 .sched_n_children_max = UINT32_MAX,
352 .sched_sp_n_priorities_max = 1,
353 .sched_wfq_n_children_per_group_max = UINT32_MAX,
354 .sched_wfq_n_groups_max = 1,
355 .sched_wfq_weight_max = 1,
357 .stats_mask = STATS_MASK_DEFAULT,
361 [TM_NODE_LEVEL_SUBPORT] = {
362 .n_nodes_max = UINT32_MAX,
363 .n_nodes_nonleaf_max = UINT32_MAX,
364 .n_nodes_leaf_max = 0,
365 .non_leaf_nodes_identical = 1,
366 .leaf_nodes_identical = 0,
369 .shaper_private_supported = 1,
370 .shaper_private_dual_rate_supported = 0,
371 .shaper_private_rate_min = 1,
372 .shaper_private_rate_max = UINT32_MAX,
373 .shaper_shared_n_max = 0,
375 .sched_n_children_max = UINT32_MAX,
376 .sched_sp_n_priorities_max = 1,
377 .sched_wfq_n_children_per_group_max = UINT32_MAX,
378 .sched_wfq_n_groups_max = 1,
379 #ifdef RTE_SCHED_SUBPORT_TC_OV
380 .sched_wfq_weight_max = UINT32_MAX,
382 .sched_wfq_weight_max = 1,
384 .stats_mask = STATS_MASK_DEFAULT,
388 [TM_NODE_LEVEL_PIPE] = {
389 .n_nodes_max = UINT32_MAX,
390 .n_nodes_nonleaf_max = UINT32_MAX,
391 .n_nodes_leaf_max = 0,
392 .non_leaf_nodes_identical = 1,
393 .leaf_nodes_identical = 0,
396 .shaper_private_supported = 1,
397 .shaper_private_dual_rate_supported = 0,
398 .shaper_private_rate_min = 1,
399 .shaper_private_rate_max = UINT32_MAX,
400 .shaper_shared_n_max = 0,
402 .sched_n_children_max =
403 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
404 .sched_sp_n_priorities_max =
405 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
406 .sched_wfq_n_children_per_group_max = 1,
407 .sched_wfq_n_groups_max = 0,
408 .sched_wfq_weight_max = 1,
410 .stats_mask = STATS_MASK_DEFAULT,
414 [TM_NODE_LEVEL_TC] = {
415 .n_nodes_max = UINT32_MAX,
416 .n_nodes_nonleaf_max = UINT32_MAX,
417 .n_nodes_leaf_max = 0,
418 .non_leaf_nodes_identical = 1,
419 .leaf_nodes_identical = 0,
422 .shaper_private_supported = 1,
423 .shaper_private_dual_rate_supported = 0,
424 .shaper_private_rate_min = 1,
425 .shaper_private_rate_max = UINT32_MAX,
426 .shaper_shared_n_max = 1,
428 .sched_n_children_max =
429 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
430 .sched_sp_n_priorities_max = 1,
431 .sched_wfq_n_children_per_group_max =
432 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
433 .sched_wfq_n_groups_max = 1,
434 .sched_wfq_weight_max = UINT32_MAX,
436 .stats_mask = STATS_MASK_DEFAULT,
440 [TM_NODE_LEVEL_QUEUE] = {
441 .n_nodes_max = UINT32_MAX,
442 .n_nodes_nonleaf_max = 0,
443 .n_nodes_leaf_max = UINT32_MAX,
444 .non_leaf_nodes_identical = 0,
445 .leaf_nodes_identical = 1,
448 .shaper_private_supported = 0,
449 .shaper_private_dual_rate_supported = 0,
450 .shaper_private_rate_min = 0,
451 .shaper_private_rate_max = 0,
452 .shaper_shared_n_max = 0,
454 .cman_head_drop_supported = 0,
455 .cman_wred_context_private_supported = WRED_SUPPORTED,
456 .cman_wred_context_shared_n_max = 0,
458 .stats_mask = STATS_MASK_QUEUE,
463 /* Traffic manager level capabilities get */
465 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
467 struct rte_tm_level_capabilities *cap,
468 struct rte_tm_error *error)
471 return -rte_tm_error_set(error,
473 RTE_TM_ERROR_TYPE_CAPABILITIES,
475 rte_strerror(EINVAL));
477 if (level_id >= TM_NODE_LEVEL_MAX)
478 return -rte_tm_error_set(error,
480 RTE_TM_ERROR_TYPE_LEVEL_ID,
482 rte_strerror(EINVAL));
484 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
487 case TM_NODE_LEVEL_PORT:
488 cap->nonleaf.sched_n_children_max =
489 tm_level_get_max_nodes(dev,
490 TM_NODE_LEVEL_SUBPORT);
491 cap->nonleaf.sched_wfq_n_children_per_group_max =
492 cap->nonleaf.sched_n_children_max;
495 case TM_NODE_LEVEL_SUBPORT:
496 cap->n_nodes_max = tm_level_get_max_nodes(dev,
497 TM_NODE_LEVEL_SUBPORT);
498 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
499 cap->nonleaf.sched_n_children_max =
500 tm_level_get_max_nodes(dev,
502 cap->nonleaf.sched_wfq_n_children_per_group_max =
503 cap->nonleaf.sched_n_children_max;
506 case TM_NODE_LEVEL_PIPE:
507 cap->n_nodes_max = tm_level_get_max_nodes(dev,
509 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
512 case TM_NODE_LEVEL_TC:
513 cap->n_nodes_max = tm_level_get_max_nodes(dev,
515 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
518 case TM_NODE_LEVEL_QUEUE:
520 cap->n_nodes_max = tm_level_get_max_nodes(dev,
521 TM_NODE_LEVEL_QUEUE);
522 cap->n_nodes_leaf_max = cap->n_nodes_max;
529 static const struct rte_tm_node_capabilities tm_node_cap[] = {
530 [TM_NODE_LEVEL_PORT] = {
531 .shaper_private_supported = 1,
532 .shaper_private_dual_rate_supported = 0,
533 .shaper_private_rate_min = 1,
534 .shaper_private_rate_max = UINT32_MAX,
535 .shaper_shared_n_max = 0,
538 .sched_n_children_max = UINT32_MAX,
539 .sched_sp_n_priorities_max = 1,
540 .sched_wfq_n_children_per_group_max = UINT32_MAX,
541 .sched_wfq_n_groups_max = 1,
542 .sched_wfq_weight_max = 1,
545 .stats_mask = STATS_MASK_DEFAULT,
548 [TM_NODE_LEVEL_SUBPORT] = {
549 .shaper_private_supported = 1,
550 .shaper_private_dual_rate_supported = 0,
551 .shaper_private_rate_min = 1,
552 .shaper_private_rate_max = UINT32_MAX,
553 .shaper_shared_n_max = 0,
556 .sched_n_children_max = UINT32_MAX,
557 .sched_sp_n_priorities_max = 1,
558 .sched_wfq_n_children_per_group_max = UINT32_MAX,
559 .sched_wfq_n_groups_max = 1,
560 .sched_wfq_weight_max = UINT32_MAX,
563 .stats_mask = STATS_MASK_DEFAULT,
566 [TM_NODE_LEVEL_PIPE] = {
567 .shaper_private_supported = 1,
568 .shaper_private_dual_rate_supported = 0,
569 .shaper_private_rate_min = 1,
570 .shaper_private_rate_max = UINT32_MAX,
571 .shaper_shared_n_max = 0,
574 .sched_n_children_max =
575 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
576 .sched_sp_n_priorities_max =
577 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
578 .sched_wfq_n_children_per_group_max = 1,
579 .sched_wfq_n_groups_max = 0,
580 .sched_wfq_weight_max = 1,
583 .stats_mask = STATS_MASK_DEFAULT,
586 [TM_NODE_LEVEL_TC] = {
587 .shaper_private_supported = 1,
588 .shaper_private_dual_rate_supported = 0,
589 .shaper_private_rate_min = 1,
590 .shaper_private_rate_max = UINT32_MAX,
591 .shaper_shared_n_max = 1,
594 .sched_n_children_max =
595 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
596 .sched_sp_n_priorities_max = 1,
597 .sched_wfq_n_children_per_group_max =
598 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
599 .sched_wfq_n_groups_max = 1,
600 .sched_wfq_weight_max = UINT32_MAX,
603 .stats_mask = STATS_MASK_DEFAULT,
606 [TM_NODE_LEVEL_QUEUE] = {
607 .shaper_private_supported = 0,
608 .shaper_private_dual_rate_supported = 0,
609 .shaper_private_rate_min = 0,
610 .shaper_private_rate_max = 0,
611 .shaper_shared_n_max = 0,
615 .cman_head_drop_supported = 0,
616 .cman_wred_context_private_supported = WRED_SUPPORTED,
617 .cman_wred_context_shared_n_max = 0,
620 .stats_mask = STATS_MASK_QUEUE,
624 /* Traffic manager node capabilities get */
626 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
628 struct rte_tm_node_capabilities *cap,
629 struct rte_tm_error *error)
631 struct tm_node *tm_node;
634 return -rte_tm_error_set(error,
636 RTE_TM_ERROR_TYPE_CAPABILITIES,
638 rte_strerror(EINVAL));
640 tm_node = tm_node_search(dev, node_id);
642 return -rte_tm_error_set(error,
644 RTE_TM_ERROR_TYPE_NODE_ID,
646 rte_strerror(EINVAL));
648 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
650 switch (tm_node->level) {
651 case TM_NODE_LEVEL_PORT:
652 cap->nonleaf.sched_n_children_max =
653 tm_level_get_max_nodes(dev,
654 TM_NODE_LEVEL_SUBPORT);
655 cap->nonleaf.sched_wfq_n_children_per_group_max =
656 cap->nonleaf.sched_n_children_max;
659 case TM_NODE_LEVEL_SUBPORT:
660 cap->nonleaf.sched_n_children_max =
661 tm_level_get_max_nodes(dev,
663 cap->nonleaf.sched_wfq_n_children_per_group_max =
664 cap->nonleaf.sched_n_children_max;
667 case TM_NODE_LEVEL_PIPE:
668 case TM_NODE_LEVEL_TC:
669 case TM_NODE_LEVEL_QUEUE:
677 const struct rte_tm_ops pmd_tm_ops = {
678 .capabilities_get = pmd_tm_capabilities_get,
679 .level_capabilities_get = pmd_tm_level_capabilities_get,
680 .node_capabilities_get = pmd_tm_node_capabilities_get,