1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev.h>
12 #include <rte_malloc.h>
13 #include <rte_tm_driver.h>
16 #include <rte_sched.h>
17 #include <rte_ethdev_driver.h>
20 #include <rte_rawdev.h>
21 #include <rte_rawdev_pmd.h>
22 #include <rte_bus_ifpga.h>
23 #include <ifpga_logs.h>
25 #include "ipn3ke_rawdev_api.h"
26 #include "ipn3ke_logs.h"
27 #include "ipn3ke_ethdev.h"
29 #define BYTES_IN_MBPS (1000 * 1000 / 8)
30 #define SUBPORT_TC_PERIOD 10
31 #define PIPE_TC_PERIOD 40
33 struct ipn3ke_tm_shaper_params_range_type {
41 struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
44 { 4, 7, 0, 1, 16, 28},
45 { 8, 15, 0, 1, 32, 60},
46 { 16, 31, 0, 1, 64, 124},
47 { 32, 63, 0, 1, 128, 252},
48 { 64, 127, 0, 1, 256, 508},
49 {128, 255, 0, 1, 512, 1020},
50 {256, 511, 0, 1, 1024, 2044},
51 {512, 1023, 0, 1, 2048, 4092},
52 {512, 1023, 1, 2, 4096, 8184},
53 {512, 1023, 2, 4, 8192, 16368},
54 {512, 1023, 3, 8, 16384, 32736},
55 {512, 1023, 4, 16, 32768, 65472},
56 {512, 1023, 5, 32, 65536, 130944},
57 {512, 1023, 6, 64, 131072, 261888},
58 {512, 1023, 7, 128, 262144, 523776},
59 {512, 1023, 8, 256, 524288, 1047552},
60 {512, 1023, 9, 512, 1048576, 2095104},
61 {512, 1023, 10, 1024, 2097152, 4190208},
62 {512, 1023, 11, 2048, 4194304, 8380416},
63 {512, 1023, 12, 4096, 8388608, 16760832},
64 {512, 1023, 13, 8192, 16777216, 33521664},
65 {512, 1023, 14, 16384, 33554432, 67043328},
66 {512, 1023, 15, 32768, 67108864, 134086656},
69 #define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
70 sizeof(struct ipn3ke_tm_shaper_params_range_type))
72 #define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
73 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
75 #define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
76 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
79 ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
81 #define SCRATCH_DATA 0xABCDEF
82 struct ipn3ke_tm_node *nodes;
83 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
89 #if IPN3KE_TM_SCRATCH_RW
90 uint32_t scratch_data;
91 IPN3KE_MASK_WRITE_REG(hw,
96 scratch_data = IPN3KE_MASK_READ_REG(hw,
100 if (scratch_data != SCRATCH_DATA)
103 /* alloc memory for all hierarchy nodes */
104 node_num = hw->port_num +
105 IPN3KE_TM_VT_NODE_NUM +
106 IPN3KE_TM_COS_NODE_NUM;
108 nodes = rte_zmalloc("ipn3ke_tm_nodes",
109 sizeof(struct ipn3ke_tm_node) * node_num,
114 /* alloc memory for Tail Drop Profile */
115 tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
116 sizeof(struct ipn3ke_tm_tdrop_profile) *
117 IPN3KE_TM_TDROP_PROFILE_NUM,
119 if (!tdrop_profile) {
125 hw->port_nodes = nodes;
126 hw->vt_nodes = hw->port_nodes + hw->port_num;
127 hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
128 hw->tdrop_profile = tdrop_profile;
129 hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
131 for (i = 0, nodes = hw->port_nodes;
134 nodes->node_index = i;
135 nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
136 nodes->tm_id = RTE_TM_NODE_ID_NULL;
137 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
138 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
139 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
141 nodes->parent_node = NULL;
142 nodes->shaper_profile.valid = 0;
143 nodes->tdrop_profile = NULL;
144 nodes->n_children = 0;
145 TAILQ_INIT(&nodes->children_node_list);
148 for (i = 0, nodes = hw->vt_nodes;
149 i < IPN3KE_TM_VT_NODE_NUM;
151 nodes->node_index = i;
152 nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
153 nodes->tm_id = RTE_TM_NODE_ID_NULL;
154 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
155 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
156 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
158 nodes->parent_node = NULL;
159 nodes->shaper_profile.valid = 0;
160 nodes->tdrop_profile = NULL;
161 nodes->n_children = 0;
162 TAILQ_INIT(&nodes->children_node_list);
165 for (i = 0, nodes = hw->cos_nodes;
166 i < IPN3KE_TM_COS_NODE_NUM;
168 nodes->node_index = i;
169 nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
170 nodes->tm_id = RTE_TM_NODE_ID_NULL;
171 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
172 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
173 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
175 nodes->parent_node = NULL;
176 nodes->shaper_profile.valid = 0;
177 nodes->tdrop_profile = NULL;
178 nodes->n_children = 0;
179 TAILQ_INIT(&nodes->children_node_list);
182 for (i = 0, tdrop_profile = hw->tdrop_profile;
183 i < IPN3KE_TM_TDROP_PROFILE_NUM;
184 i++, tdrop_profile++) {
185 tdrop_profile->tdrop_profile_id = i;
186 tdrop_profile->n_users = 0;
187 tdrop_profile->valid = 0;
194 ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
196 struct ipn3ke_tm_internals *tm;
197 struct ipn3ke_tm_node *port_node;
201 port_node = &rpst->hw->port_nodes[rpst->port_id];
202 tm->h.port_node = port_node;
204 tm->h.n_shaper_profiles = 0;
205 tm->h.n_tdrop_profiles = 0;
206 tm->h.n_vt_nodes = 0;
207 tm->h.n_cos_nodes = 0;
209 tm->h.port_commit_node = NULL;
210 TAILQ_INIT(&tm->h.vt_commit_node_list);
211 TAILQ_INIT(&tm->h.cos_commit_node_list);
213 tm->hierarchy_frozen = 0;
215 tm->tm_id = rpst->port_id;
218 static struct ipn3ke_tm_shaper_profile *
219 ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
220 uint32_t shaper_profile_id, struct rte_tm_error *error)
222 struct ipn3ke_tm_shaper_profile *sp = NULL;
223 uint32_t level_of_node_id;
226 /* Shaper profile ID must not be NONE. */
227 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
228 rte_tm_error_set(error,
230 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
232 rte_strerror(EINVAL));
237 level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
238 node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
240 switch (level_of_node_id) {
241 case IPN3KE_TM_NODE_LEVEL_PORT:
242 if (node_index >= hw->port_num)
243 rte_tm_error_set(error,
245 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
247 rte_strerror(EEXIST));
249 sp = &hw->port_nodes[node_index].shaper_profile;
253 case IPN3KE_TM_NODE_LEVEL_VT:
254 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
255 rte_tm_error_set(error,
257 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
259 rte_strerror(EEXIST));
261 sp = &hw->vt_nodes[node_index].shaper_profile;
265 case IPN3KE_TM_NODE_LEVEL_COS:
266 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
267 rte_tm_error_set(error,
269 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
271 rte_strerror(EEXIST));
273 sp = &hw->cos_nodes[node_index].shaper_profile;
277 rte_tm_error_set(error,
279 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
281 rte_strerror(EEXIST));
287 static struct ipn3ke_tm_tdrop_profile *
288 ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
289 uint32_t tdrop_profile_id)
291 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
293 if (tdrop_profile_id >= hw->tdrop_profile_num)
296 tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
297 if (tdrop_profile->valid)
298 return tdrop_profile;
303 static struct ipn3ke_tm_node *
304 ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
305 uint32_t node_id, uint32_t state_mask)
307 uint32_t level_of_node_id;
309 struct ipn3ke_tm_node *n;
311 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
312 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
314 switch (level_of_node_id) {
315 case IPN3KE_TM_NODE_LEVEL_PORT:
316 if (node_index >= hw->port_num)
318 n = &hw->port_nodes[node_index];
321 case IPN3KE_TM_NODE_LEVEL_VT:
322 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
324 n = &hw->vt_nodes[node_index];
327 case IPN3KE_TM_NODE_LEVEL_COS:
328 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
330 n = &hw->cos_nodes[node_index];
337 /* Check tm node status */
338 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
339 if (n->tm_id != RTE_TM_NODE_ID_NULL ||
340 n->parent_node_id != RTE_TM_NODE_ID_NULL ||
341 n->parent_node != NULL ||
343 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
345 } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
346 if (n->tm_id == RTE_TM_NODE_ID_NULL ||
347 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
348 n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
349 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
350 n->parent_node == NULL)) {
351 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
354 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
357 if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
358 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
360 else if (n->tm_id == tm_id)
369 /* Traffic manager node type get */
371 ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
372 uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
374 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
375 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
377 struct ipn3ke_tm_node *node;
381 return -rte_tm_error_set(error,
383 RTE_TM_ERROR_TYPE_UNSPECIFIED,
385 rte_strerror(EINVAL));
390 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
391 node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
392 if (node_id == RTE_TM_NODE_ID_NULL ||
394 return -rte_tm_error_set(error,
396 RTE_TM_ERROR_TYPE_NODE_ID,
398 rte_strerror(EINVAL));
400 *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
405 #define WRED_SUPPORTED 0
407 #define STATS_MASK_DEFAULT \
408 (RTE_TM_STATS_N_PKTS | \
409 RTE_TM_STATS_N_BYTES | \
410 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
411 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
413 #define STATS_MASK_QUEUE \
414 (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
416 /* Traffic manager capabilities get */
418 ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
419 struct rte_tm_capabilities *cap, struct rte_tm_error *error)
422 return -rte_tm_error_set(error,
424 RTE_TM_ERROR_TYPE_CAPABILITIES,
426 rte_strerror(EINVAL));
428 /* set all the parameters to 0 first. */
429 memset(cap, 0, sizeof(*cap));
431 cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
432 cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
434 cap->non_leaf_nodes_identical = 0;
435 cap->leaf_nodes_identical = 1;
437 cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
438 cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
439 cap->shaper_private_dual_rate_n_max = 0;
440 cap->shaper_private_rate_min = 1;
441 cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
443 cap->shaper_shared_n_max = 0;
444 cap->shaper_shared_n_nodes_per_shaper_max = 0;
445 cap->shaper_shared_n_shapers_per_node_max = 0;
446 cap->shaper_shared_dual_rate_n_max = 0;
447 cap->shaper_shared_rate_min = 0;
448 cap->shaper_shared_rate_max = 0;
450 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
451 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
453 cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
454 cap->sched_sp_n_priorities_max = 3;
455 cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
456 cap->sched_wfq_n_groups_max = 1;
457 cap->sched_wfq_weight_max = UINT32_MAX;
459 cap->cman_wred_packet_mode_supported = 0;
460 cap->cman_wred_byte_mode_supported = 0;
461 cap->cman_head_drop_supported = 0;
462 cap->cman_wred_context_n_max = 0;
463 cap->cman_wred_context_private_n_max = 0;
464 cap->cman_wred_context_shared_n_max = 0;
465 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
466 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
469 * cap->mark_vlan_dei_supported = {0, 0, 0};
470 * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
471 * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
472 * cap->mark_ip_dscp_supported = {0, 0, 0};
475 cap->dynamic_update_mask = 0;
482 /* Traffic manager level capabilities get */
484 ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
485 uint32_t level_id, struct rte_tm_level_capabilities *cap,
486 struct rte_tm_error *error)
488 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
491 return -rte_tm_error_set(error,
493 RTE_TM_ERROR_TYPE_CAPABILITIES,
495 rte_strerror(EINVAL));
497 if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
498 return -rte_tm_error_set(error,
500 RTE_TM_ERROR_TYPE_LEVEL_ID,
502 rte_strerror(EINVAL));
504 /* set all the parameters to 0 first. */
505 memset(cap, 0, sizeof(*cap));
508 case IPN3KE_TM_NODE_LEVEL_PORT:
509 cap->n_nodes_max = hw->port_num;
510 cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
511 cap->n_nodes_leaf_max = 0;
512 cap->non_leaf_nodes_identical = 0;
513 cap->leaf_nodes_identical = 0;
515 cap->nonleaf.shaper_private_supported = 0;
516 cap->nonleaf.shaper_private_dual_rate_supported = 0;
517 cap->nonleaf.shaper_private_rate_min = 1;
518 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
519 cap->nonleaf.shaper_shared_n_max = 0;
521 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
522 cap->nonleaf.sched_sp_n_priorities_max = 1;
523 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
524 cap->nonleaf.sched_wfq_n_groups_max = 0;
525 cap->nonleaf.sched_wfq_weight_max = 0;
527 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
530 case IPN3KE_TM_NODE_LEVEL_VT:
531 cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
532 cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
533 cap->n_nodes_leaf_max = 0;
534 cap->non_leaf_nodes_identical = 0;
535 cap->leaf_nodes_identical = 0;
537 cap->nonleaf.shaper_private_supported = 0;
538 cap->nonleaf.shaper_private_dual_rate_supported = 0;
539 cap->nonleaf.shaper_private_rate_min = 1;
540 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
541 cap->nonleaf.shaper_shared_n_max = 0;
543 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
544 cap->nonleaf.sched_sp_n_priorities_max = 1;
545 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
546 cap->nonleaf.sched_wfq_n_groups_max = 0;
547 cap->nonleaf.sched_wfq_weight_max = 0;
549 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
552 case IPN3KE_TM_NODE_LEVEL_COS:
553 cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
554 cap->n_nodes_nonleaf_max = 0;
555 cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
556 cap->non_leaf_nodes_identical = 0;
557 cap->leaf_nodes_identical = 0;
559 cap->leaf.shaper_private_supported = 0;
560 cap->leaf.shaper_private_dual_rate_supported = 0;
561 cap->leaf.shaper_private_rate_min = 0;
562 cap->leaf.shaper_private_rate_max = 0;
563 cap->leaf.shaper_shared_n_max = 0;
565 cap->leaf.cman_head_drop_supported = 0;
566 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
567 cap->leaf.cman_wred_byte_mode_supported = 0;
568 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
569 cap->leaf.cman_wred_context_shared_n_max = 0;
571 cap->leaf.stats_mask = STATS_MASK_QUEUE;
575 return -rte_tm_error_set(error,
577 RTE_TM_ERROR_TYPE_LEVEL_ID,
579 rte_strerror(EINVAL));
586 /* Traffic manager node capabilities get */
588 ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
589 uint32_t node_id, struct rte_tm_node_capabilities *cap,
590 struct rte_tm_error *error)
592 struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
593 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
594 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
596 struct ipn3ke_tm_node *tm_node;
600 return -rte_tm_error_set(error,
602 RTE_TM_ERROR_TYPE_CAPABILITIES,
604 rte_strerror(EINVAL));
609 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
610 tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
612 return -rte_tm_error_set(error,
614 RTE_TM_ERROR_TYPE_NODE_ID,
616 rte_strerror(EINVAL));
618 if (tm_node->tm_id != representor->port_id)
619 return -rte_tm_error_set(error,
621 RTE_TM_ERROR_TYPE_NODE_ID,
623 rte_strerror(EINVAL));
625 /* set all the parameters to 0 first. */
626 memset(cap, 0, sizeof(*cap));
628 switch (tm_node->level) {
629 case IPN3KE_TM_NODE_LEVEL_PORT:
630 cap->shaper_private_supported = 1;
631 cap->shaper_private_dual_rate_supported = 0;
632 cap->shaper_private_rate_min = 1;
633 cap->shaper_private_rate_max = UINT32_MAX;
634 cap->shaper_shared_n_max = 0;
636 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
637 cap->nonleaf.sched_sp_n_priorities_max = 1;
638 cap->nonleaf.sched_wfq_n_children_per_group_max =
639 IPN3KE_TM_VT_NODE_NUM;
640 cap->nonleaf.sched_wfq_n_groups_max = 1;
641 cap->nonleaf.sched_wfq_weight_max = 1;
643 cap->stats_mask = STATS_MASK_DEFAULT;
646 case IPN3KE_TM_NODE_LEVEL_VT:
647 cap->shaper_private_supported = 1;
648 cap->shaper_private_dual_rate_supported = 0;
649 cap->shaper_private_rate_min = 1;
650 cap->shaper_private_rate_max = UINT32_MAX;
651 cap->shaper_shared_n_max = 0;
653 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
654 cap->nonleaf.sched_sp_n_priorities_max = 1;
655 cap->nonleaf.sched_wfq_n_children_per_group_max =
656 IPN3KE_TM_COS_NODE_NUM;
657 cap->nonleaf.sched_wfq_n_groups_max = 1;
658 cap->nonleaf.sched_wfq_weight_max = 1;
660 cap->stats_mask = STATS_MASK_DEFAULT;
663 case IPN3KE_TM_NODE_LEVEL_COS:
664 cap->shaper_private_supported = 0;
665 cap->shaper_private_dual_rate_supported = 0;
666 cap->shaper_private_rate_min = 0;
667 cap->shaper_private_rate_max = 0;
668 cap->shaper_shared_n_max = 0;
670 cap->leaf.cman_head_drop_supported = 0;
671 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
672 cap->leaf.cman_wred_byte_mode_supported = 0;
673 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
674 cap->leaf.cman_wred_context_shared_n_max = 0;
676 cap->stats_mask = STATS_MASK_QUEUE;
686 ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
687 struct ipn3ke_tm_shaper_profile *local_profile,
688 const struct ipn3ke_tm_shaper_params_range_type *ref_data)
691 const struct ipn3ke_tm_shaper_params_range_type *r;
694 rate = profile->peak.rate;
695 for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
696 if (rate >= r->low &&
698 local_profile->m = (rate / 4) / r->exp2;
699 local_profile->e = r->exp;
700 local_profile->rate = rate;
710 ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
711 uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
712 struct rte_tm_error *error)
714 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
715 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
716 struct ipn3ke_tm_shaper_profile *sp;
718 /* Shaper profile must not exist. */
719 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
720 if (!sp || (sp && sp->valid))
721 return -rte_tm_error_set(error,
723 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
725 rte_strerror(EEXIST));
727 /* Profile must not be NULL. */
729 return -rte_tm_error_set(error,
731 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
733 rte_strerror(EINVAL));
735 /* Peak rate: non-zero, 32-bit */
736 if (profile->peak.rate == 0 ||
737 profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
738 return -rte_tm_error_set(error,
740 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
742 rte_strerror(EINVAL));
744 /* Peak size: non-zero, 32-bit */
745 if (profile->peak.size != 0)
746 return -rte_tm_error_set(error,
748 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
750 rte_strerror(EINVAL));
752 /* Dual-rate profiles are not supported. */
753 if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
754 return -rte_tm_error_set(error,
756 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
758 rte_strerror(EINVAL));
760 /* Packet length adjust: 24 bytes */
761 if (profile->pkt_length_adjust != 0)
762 return -rte_tm_error_set(error,
764 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
766 rte_strerror(EINVAL));
768 if (ipn3ke_tm_shaper_parame_trans(profile,
770 ipn3ke_tm_shaper_params_rang)) {
771 return -rte_tm_error_set(error,
773 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
775 rte_strerror(EINVAL));
778 rte_memcpy(&sp->params, profile, sizeof(sp->params));
781 tm->h.n_shaper_profiles++;
786 /* Traffic manager shaper profile delete */
788 ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
789 uint32_t shaper_profile_id, struct rte_tm_error *error)
791 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
792 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
793 struct ipn3ke_tm_shaper_profile *sp;
796 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
797 if (!sp || (sp && !sp->valid))
798 return -rte_tm_error_set(error,
800 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
802 rte_strerror(EINVAL));
805 tm->h.n_shaper_profiles--;
811 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
812 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
813 struct rte_tm_error *error)
815 enum rte_color color;
817 /* TDROP profile ID must not be NONE. */
818 if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
819 return -rte_tm_error_set(error,
821 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
823 rte_strerror(EINVAL));
825 /* Profile must not be NULL. */
827 return -rte_tm_error_set(error,
829 RTE_TM_ERROR_TYPE_WRED_PROFILE,
831 rte_strerror(EINVAL));
833 /* TDROP profile should be in packet mode */
834 if (profile->packet_mode != 0)
835 return -rte_tm_error_set(error,
837 RTE_TM_ERROR_TYPE_WRED_PROFILE,
839 rte_strerror(ENOTSUP));
841 /* min_th <= max_th, max_th > 0 */
842 for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
843 uint64_t min_th = profile->red_params[color].min_th;
844 uint64_t max_th = profile->red_params[color].max_th;
846 if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
847 IPN3KE_TDROP_TH1_SHIFT) ||
849 return -rte_tm_error_set(error,
851 RTE_TM_ERROR_TYPE_WRED_PROFILE,
853 rte_strerror(EINVAL));
860 ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
861 struct ipn3ke_tm_tdrop_profile *tp)
864 IPN3KE_MASK_WRITE_REG(hw,
865 IPN3KE_CCB_PROFILE_MS,
868 IPN3KE_CCB_PROFILE_MS_MASK);
870 IPN3KE_MASK_WRITE_REG(hw,
871 IPN3KE_CCB_PROFILE_P,
872 tp->tdrop_profile_id,
874 IPN3KE_CCB_PROFILE_MASK);
876 IPN3KE_MASK_WRITE_REG(hw,
877 IPN3KE_CCB_PROFILE_MS,
880 IPN3KE_CCB_PROFILE_MS_MASK);
882 IPN3KE_MASK_WRITE_REG(hw,
883 IPN3KE_CCB_PROFILE_P,
884 tp->tdrop_profile_id,
886 IPN3KE_CCB_PROFILE_MASK);
892 /* Traffic manager TDROP profile add */
894 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
895 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
896 struct rte_tm_error *error)
898 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
899 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
900 struct ipn3ke_tm_tdrop_profile *tp;
905 /* Check input params */
906 status = ipn3ke_tm_tdrop_profile_check(dev,
913 /* Memory allocation */
914 tp = &hw->tdrop_profile[tdrop_profile_id];
918 min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
919 th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
920 th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
921 IPN3KE_TDROP_TH2_MASK);
924 rte_memcpy(&tp->params, profile, sizeof(tp->params));
927 tm->h.n_tdrop_profiles++;
930 ipn3ke_hw_tm_tdrop_wr(hw, tp);
935 /* Traffic manager TDROP profile delete */
937 ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
938 uint32_t tdrop_profile_id, struct rte_tm_error *error)
940 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
941 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
942 struct ipn3ke_tm_tdrop_profile *tp;
945 tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
947 return -rte_tm_error_set(error,
949 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
951 rte_strerror(EINVAL));
955 return -rte_tm_error_set(error,
957 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
959 rte_strerror(EBUSY));
963 tm->h.n_tdrop_profiles--;
966 ipn3ke_hw_tm_tdrop_wr(hw, tp);
972 ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
973 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
974 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
975 struct rte_tm_error *error)
977 uint32_t level_of_node_id;
979 uint32_t parent_level_id;
981 if (node_id == RTE_TM_NODE_ID_NULL)
982 return -rte_tm_error_set(error,
984 RTE_TM_ERROR_TYPE_NODE_ID,
986 rte_strerror(EINVAL));
988 /* priority: must be 0, 1, 2, 3 */
989 if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
990 return -rte_tm_error_set(error,
992 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
994 rte_strerror(EINVAL));
996 /* weight: must be 1 .. 255 */
997 if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
998 return -rte_tm_error_set(error,
1000 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1002 rte_strerror(EINVAL));
1004 /* check node id and parent id*/
1005 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1006 if (level_of_node_id != level_id)
1007 return -rte_tm_error_set(error,
1009 RTE_TM_ERROR_TYPE_NODE_ID,
1011 rte_strerror(EINVAL));
1012 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1013 parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1015 case IPN3KE_TM_NODE_LEVEL_PORT:
1016 if (node_index != tm_id)
1017 return -rte_tm_error_set(error,
1019 RTE_TM_ERROR_TYPE_NODE_ID,
1021 rte_strerror(EINVAL));
1022 if (parent_node_id != RTE_TM_NODE_ID_NULL)
1023 return -rte_tm_error_set(error,
1025 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1027 rte_strerror(EINVAL));
1030 case IPN3KE_TM_NODE_LEVEL_VT:
1031 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1032 return -rte_tm_error_set(error,
1034 RTE_TM_ERROR_TYPE_NODE_ID,
1036 rte_strerror(EINVAL));
1037 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1038 return -rte_tm_error_set(error,
1040 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1042 rte_strerror(EINVAL));
1045 case IPN3KE_TM_NODE_LEVEL_COS:
1046 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1047 return -rte_tm_error_set(error,
1049 RTE_TM_ERROR_TYPE_NODE_ID,
1051 rte_strerror(EINVAL));
1052 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1053 return -rte_tm_error_set(error,
1055 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1057 rte_strerror(EINVAL));
1060 return -rte_tm_error_set(error,
1062 RTE_TM_ERROR_TYPE_LEVEL_ID,
1064 rte_strerror(EINVAL));
1067 /* params: must not be NULL */
1069 return -rte_tm_error_set(error,
1071 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1073 rte_strerror(EINVAL));
1074 /* No shared shapers */
1075 if (params->n_shared_shapers != 0)
1076 return -rte_tm_error_set(error,
1078 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1080 rte_strerror(EINVAL));
1085 ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1086 uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1087 struct rte_tm_error *error)
1089 /*struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);*/
1090 uint32_t node_index;
1091 uint32_t parent_index;
1092 uint32_t parent_index1;
1094 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1095 parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1096 parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1098 case IPN3KE_TM_NODE_LEVEL_PORT:
1101 case IPN3KE_TM_NODE_LEVEL_VT:
1102 if (parent_index != tm_id)
1103 return -rte_tm_error_set(error,
1105 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1107 rte_strerror(EINVAL));
1110 case IPN3KE_TM_NODE_LEVEL_COS:
1111 if (parent_index != parent_index1)
1112 return -rte_tm_error_set(error,
1114 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1116 rte_strerror(EINVAL));
1119 return -rte_tm_error_set(error,
1121 RTE_TM_ERROR_TYPE_LEVEL_ID,
1123 rte_strerror(EINVAL));
1129 /* Traffic manager node add */
1131 ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1132 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1133 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1134 struct rte_tm_error *error)
1136 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1137 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1139 struct ipn3ke_tm_node *n, *parent_node;
1140 uint32_t node_state, state_mask;
1144 if (tm->hierarchy_frozen)
1145 return -rte_tm_error_set(error,
1147 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1149 rte_strerror(EBUSY));
1153 status = ipn3ke_tm_node_add_check_parameter(tm_id,
1164 status = ipn3ke_tm_node_add_check_mount(tm_id,
1172 /* Shaper profile ID must not be NONE. */
1173 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1174 params->shaper_profile_id != node_id)
1175 return -rte_tm_error_set(error,
1177 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1179 rte_strerror(EINVAL));
1181 /* Memory allocation */
1183 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1184 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1185 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1187 return -rte_tm_error_set(error,
1189 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1191 rte_strerror(EINVAL));
1192 node_state = n->node_state;
1194 /* Check parent node */
1196 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1197 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1198 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1199 parent_node = ipn3ke_hw_tm_node_search(hw,
1204 return -rte_tm_error_set(error,
1206 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1208 rte_strerror(EINVAL));
1214 case IPN3KE_TM_NODE_LEVEL_PORT:
1215 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1217 tm->h.port_commit_node = n;
1220 case IPN3KE_TM_NODE_LEVEL_VT:
1221 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1222 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1224 parent_node->n_children++;
1226 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1228 parent_node->n_children++;
1231 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1232 n->parent_node_id = parent_node_id;
1234 n->parent_node = parent_node;
1238 case IPN3KE_TM_NODE_LEVEL_COS:
1239 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1240 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1243 parent_node->n_children++;
1244 tm->h.n_cos_nodes++;
1245 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1247 parent_node->n_children++;
1248 tm->h.n_cos_nodes++;
1250 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1251 n->parent_node_id = parent_node_id;
1253 n->parent_node = parent_node;
1257 return -rte_tm_error_set(error,
1259 RTE_TM_ERROR_TYPE_LEVEL_ID,
1261 rte_strerror(EINVAL));
1265 n->priority = priority;
1268 if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1269 params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1270 n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1271 params->leaf.wred.wred_profile_id);
1273 rte_memcpy(&n->params, params, sizeof(n->params));
1279 ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1280 uint32_t node_id, struct rte_tm_error *error)
1282 uint32_t level_of_node_id;
1283 uint32_t node_index;
1285 if (node_id == RTE_TM_NODE_ID_NULL)
1286 return -rte_tm_error_set(error,
1288 RTE_TM_ERROR_TYPE_NODE_ID,
1290 rte_strerror(EINVAL));
1292 /* check node id and parent id*/
1293 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1294 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1295 switch (level_of_node_id) {
1296 case IPN3KE_TM_NODE_LEVEL_PORT:
1297 if (node_index != tm_id)
1298 return -rte_tm_error_set(error,
1300 RTE_TM_ERROR_TYPE_NODE_ID,
1302 rte_strerror(EINVAL));
1305 case IPN3KE_TM_NODE_LEVEL_VT:
1306 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1307 return -rte_tm_error_set(error,
1309 RTE_TM_ERROR_TYPE_NODE_ID,
1311 rte_strerror(EINVAL));
1314 case IPN3KE_TM_NODE_LEVEL_COS:
1315 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1316 return -rte_tm_error_set(error,
1318 RTE_TM_ERROR_TYPE_NODE_ID,
1320 rte_strerror(EINVAL));
1323 return -rte_tm_error_set(error,
1325 RTE_TM_ERROR_TYPE_LEVEL_ID,
1327 rte_strerror(EINVAL));
1333 /* Traffic manager node delete */
1335 ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1336 uint32_t node_id, struct rte_tm_error *error)
1338 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1339 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1340 struct ipn3ke_tm_node *n, *parent_node;
1343 uint32_t level_of_node_id;
1344 uint32_t node_state;
1345 uint32_t state_mask;
1347 /* Check hierarchy changes are currently allowed */
1348 if (tm->hierarchy_frozen)
1349 return -rte_tm_error_set(error,
1351 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1353 rte_strerror(EBUSY));
1357 status = ipn3ke_tm_node_del_check_parameter(tm_id,
1363 /* Check existing */
1365 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1366 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1367 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1369 return -rte_tm_error_set(error,
1371 RTE_TM_ERROR_TYPE_NODE_ID,
1373 rte_strerror(EINVAL));
1375 if (n->n_children > 0)
1376 return -rte_tm_error_set(error,
1378 RTE_TM_ERROR_TYPE_NODE_ID,
1380 rte_strerror(EINVAL));
1382 node_state = n->node_state;
1384 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1386 /* Check parent node */
1387 if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1389 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1390 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1391 parent_node = ipn3ke_hw_tm_node_search(hw,
1396 return -rte_tm_error_set(error,
1398 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1400 rte_strerror(EINVAL));
1401 if (n->parent_node != parent_node)
1402 return -rte_tm_error_set(error,
1404 RTE_TM_ERROR_TYPE_NODE_ID,
1406 rte_strerror(EINVAL));
1411 switch (level_of_node_id) {
1412 case IPN3KE_TM_NODE_LEVEL_PORT:
1413 if (tm->h.port_node != n)
1414 return -rte_tm_error_set(error,
1416 RTE_TM_ERROR_TYPE_NODE_ID,
1418 rte_strerror(EINVAL));
1419 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1420 tm->h.port_commit_node = n;
1424 case IPN3KE_TM_NODE_LEVEL_VT:
1425 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1427 TAILQ_REMOVE(&parent_node->children_node_list,
1429 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1431 parent_node->n_children--;
1433 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1435 parent_node->n_children--;
1438 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1442 case IPN3KE_TM_NODE_LEVEL_COS:
1443 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1445 TAILQ_REMOVE(&parent_node->children_node_list,
1447 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1450 parent_node->n_children--;
1451 tm->h.n_cos_nodes--;
1452 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1454 parent_node->n_children--;
1455 tm->h.n_cos_nodes--;
1457 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1461 return -rte_tm_error_set(error,
1463 RTE_TM_ERROR_TYPE_LEVEL_ID,
1465 rte_strerror(EINVAL));
1472 ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1473 struct rte_tm_error *error)
1475 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1477 struct ipn3ke_tm_node_list *nl;
1478 struct ipn3ke_tm_node *n, *parent_node;
1482 nl = &tm->h.cos_commit_node_list;
1483 TAILQ_FOREACH(n, nl, node) {
1484 parent_node = n->parent_node;
1485 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1486 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1487 n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1488 n->tm_id != tm_id ||
1489 parent_node == NULL ||
1491 parent_node->node_state ==
1492 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1494 parent_node->node_state ==
1495 IPN3KE_TM_NODE_STATE_IDLE) ||
1496 n->shaper_profile.valid == 0) {
1497 return -rte_tm_error_set(error,
1499 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1501 rte_strerror(EINVAL));
1503 } else if (n->node_state ==
1504 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1505 if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1506 n->n_children != 0) {
1507 return -rte_tm_error_set(error,
1509 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1511 rte_strerror(EINVAL));
1513 return -rte_tm_error_set(error,
1515 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1517 rte_strerror(EINVAL));
1522 nl = &tm->h.vt_commit_node_list;
1523 TAILQ_FOREACH(n, nl, node) {
1524 parent_node = n->parent_node;
1525 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1526 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1527 n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1528 n->tm_id != tm_id ||
1529 parent_node == NULL ||
1531 parent_node->node_state ==
1532 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1534 parent_node->node_state ==
1535 IPN3KE_TM_NODE_STATE_IDLE) ||
1536 n->shaper_profile.valid == 0) {
1537 return -rte_tm_error_set(error,
1539 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1541 rte_strerror(EINVAL));
1543 } else if (n->node_state ==
1544 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1545 if (n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1546 n->n_children != 0) {
1547 return -rte_tm_error_set(error,
1549 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1551 rte_strerror(EINVAL));
1553 return -rte_tm_error_set(error,
1555 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1557 rte_strerror(EINVAL));
1562 n = tm->h.port_commit_node;
1564 (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1565 n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1566 n->tm_id != tm_id ||
1567 n->parent_node != NULL ||
1568 n->shaper_profile.valid == 0)) {
1569 return -rte_tm_error_set(error,
1571 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1573 rte_strerror(EINVAL));
1580 ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1581 struct ipn3ke_tm_node *n)
1588 case IPN3KE_TM_NODE_LEVEL_PORT:
1592 IPN3KE_MASK_WRITE_REG(hw,
1593 IPN3KE_QOS_TYPE_L3_X,
1596 IPN3KE_QOS_TYPE_MASK);
1601 IPN3KE_MASK_WRITE_REG(hw,
1602 IPN3KE_QOS_SCH_WT_L3_X,
1605 IPN3KE_QOS_SCH_WT_MASK);
1610 if (n->shaper_profile.valid)
1611 IPN3KE_MASK_WRITE_REG(hw,
1612 IPN3KE_QOS_SHAP_WT_L3_X,
1614 ((n->shaper_profile.e << 10) |
1615 n->shaper_profile.m),
1616 IPN3KE_QOS_SHAP_WT_MASK);
1619 case IPN3KE_TM_NODE_LEVEL_VT:
1623 IPN3KE_MASK_WRITE_REG(hw,
1624 IPN3KE_QOS_TYPE_L2_X,
1627 IPN3KE_QOS_TYPE_MASK);
1632 IPN3KE_MASK_WRITE_REG(hw,
1633 IPN3KE_QOS_SCH_WT_L2_X,
1636 IPN3KE_QOS_SCH_WT_MASK);
1641 if (n->shaper_profile.valid)
1642 IPN3KE_MASK_WRITE_REG(hw,
1643 IPN3KE_QOS_SHAP_WT_L2_X,
1645 ((n->shaper_profile.e << 10) |
1646 n->shaper_profile.m),
1647 IPN3KE_QOS_SHAP_WT_MASK);
1652 IPN3KE_MASK_WRITE_REG(hw,
1653 IPN3KE_QOS_MAP_L2_X,
1655 n->parent_node->node_index,
1656 IPN3KE_QOS_MAP_L2_MASK);
1659 case IPN3KE_TM_NODE_LEVEL_COS:
1661 * Configure Tail Drop mapping
1663 if (n->tdrop_profile && n->tdrop_profile->valid) {
1664 IPN3KE_MASK_WRITE_REG(hw,
1665 IPN3KE_CCB_QPROFILE_Q,
1667 n->tdrop_profile->tdrop_profile_id,
1668 IPN3KE_CCB_QPROFILE_MASK);
1674 IPN3KE_MASK_WRITE_REG(hw,
1675 IPN3KE_QOS_TYPE_L1_X,
1678 IPN3KE_QOS_TYPE_MASK);
1683 IPN3KE_MASK_WRITE_REG(hw,
1684 IPN3KE_QOS_SCH_WT_L1_X,
1687 IPN3KE_QOS_SCH_WT_MASK);
1692 if (n->shaper_profile.valid)
1693 IPN3KE_MASK_WRITE_REG(hw,
1694 IPN3KE_QOS_SHAP_WT_L1_X,
1696 ((n->shaper_profile.e << 10) |
1697 n->shaper_profile.m),
1698 IPN3KE_QOS_SHAP_WT_MASK);
1701 * Configure COS queue to port
1703 while (IPN3KE_MASK_READ_REG(hw,
1704 IPN3KE_QM_UID_CONFIG_CTRL,
1709 IPN3KE_MASK_WRITE_REG(hw,
1710 IPN3KE_QM_UID_CONFIG_DATA,
1712 (1 << 8 | n->parent_node->parent_node->node_index),
1715 IPN3KE_MASK_WRITE_REG(hw,
1716 IPN3KE_QM_UID_CONFIG_CTRL,
1721 while (IPN3KE_MASK_READ_REG(hw,
1722 IPN3KE_QM_UID_CONFIG_CTRL,
1730 IPN3KE_MASK_WRITE_REG(hw,
1731 IPN3KE_QOS_MAP_L1_X,
1733 n->parent_node->node_index,
1734 IPN3KE_QOS_MAP_L1_MASK);
1745 ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1746 struct rte_tm_error *error)
1748 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1749 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1750 struct ipn3ke_tm_node_list *nl;
1751 struct ipn3ke_tm_node *n, *nn, *parent_node;
1753 n = tm->h.port_commit_node;
1755 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1756 tm->h.port_commit_node = NULL;
1758 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1759 } else if (n->node_state ==
1760 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1761 tm->h.port_commit_node = NULL;
1763 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1764 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1766 n->tm_id = RTE_TM_NODE_ID_NULL;
1768 return -rte_tm_error_set(error,
1770 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1772 rte_strerror(EINVAL));
1774 ipn3ke_hw_tm_node_wr(hw, n);
1777 nl = &tm->h.vt_commit_node_list;
1778 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1779 nn = TAILQ_NEXT(n, node);
1780 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1781 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1782 parent_node = n->parent_node;
1783 TAILQ_REMOVE(nl, n, node);
1784 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1786 } else if (n->node_state ==
1787 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1788 parent_node = n->parent_node;
1789 TAILQ_REMOVE(nl, n, node);
1791 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1792 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1793 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1795 n->tm_id = RTE_TM_NODE_ID_NULL;
1796 n->parent_node = NULL;
1798 return -rte_tm_error_set(error,
1800 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1802 rte_strerror(EINVAL));
1804 ipn3ke_hw_tm_node_wr(hw, n);
1807 nl = &tm->h.cos_commit_node_list;
1808 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1809 nn = TAILQ_NEXT(n, node);
1810 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1811 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1812 parent_node = n->parent_node;
1813 TAILQ_REMOVE(nl, n, node);
1814 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1816 } else if (n->node_state ==
1817 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1818 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1819 parent_node = n->parent_node;
1820 TAILQ_REMOVE(nl, n, node);
1822 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1823 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1824 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1826 n->tm_id = RTE_TM_NODE_ID_NULL;
1827 n->parent_node = NULL;
1829 if (n->tdrop_profile)
1830 n->tdrop_profile->n_users--;
1832 return -rte_tm_error_set(error,
1834 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1836 rte_strerror(EINVAL));
1838 ipn3ke_hw_tm_node_wr(hw, n);
1845 ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1847 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1848 struct ipn3ke_tm_node_list *nl;
1849 struct ipn3ke_tm_node *n;
1850 struct ipn3ke_tm_node *nn;
1852 n = tm->h.port_commit_node;
1854 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1855 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1857 n->tm_id = RTE_TM_NODE_ID_NULL;
1860 tm->h.port_commit_node = NULL;
1863 nl = &tm->h.vt_commit_node_list;
1864 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1865 nn = TAILQ_NEXT(n, node);
1867 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1868 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1869 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1871 n->tm_id = RTE_TM_NODE_ID_NULL;
1872 n->parent_node = NULL;
1876 TAILQ_REMOVE(nl, n, node);
1879 nl = &tm->h.cos_commit_node_list;
1880 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1881 nn = TAILQ_NEXT(n, node);
1883 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1884 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1885 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1887 n->tm_id = RTE_TM_NODE_ID_NULL;
1888 n->parent_node = NULL;
1889 tm->h.n_cos_nodes--;
1891 TAILQ_REMOVE(nl, n, node);
1898 ipn3ke_tm_show(struct rte_eth_dev *dev)
1900 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1902 struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1903 struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1904 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1911 IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1913 port_n = tm->h.port_node;
1914 IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1915 str_state[port_n->node_state]);
1917 vt_nl = &tm->h.port_node->children_node_list;
1918 TAILQ_FOREACH(vt_n, vt_nl, node) {
1919 cos_nl = &vt_n->children_node_list;
1920 IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
1921 TAILQ_FOREACH(cos_n, cos_nl, node) {
1922 if (cos_n->parent_node_id !=
1923 (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1924 IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1926 str_state[cos_n->node_state]);
1928 IPN3KE_AFU_PMD_DEBUG("\n");
1933 ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1935 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1937 struct ipn3ke_tm_node_list *nl;
1938 struct ipn3ke_tm_node *n;
1939 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1946 IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1947 n = tm->h.port_commit_node;
1948 IPN3KE_AFU_PMD_DEBUG("Port: ");
1950 IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1952 str_state[n->node_state]);
1953 IPN3KE_AFU_PMD_DEBUG("\n");
1955 nl = &tm->h.vt_commit_node_list;
1956 IPN3KE_AFU_PMD_DEBUG("VT : ");
1957 TAILQ_FOREACH(n, nl, node) {
1958 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1960 str_state[n->node_state]);
1962 IPN3KE_AFU_PMD_DEBUG("\n");
1964 nl = &tm->h.cos_commit_node_list;
1965 IPN3KE_AFU_PMD_DEBUG("COS : ");
1966 TAILQ_FOREACH(n, nl, node) {
1967 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1969 str_state[n->node_state]);
1971 IPN3KE_AFU_PMD_DEBUG("\n");
1974 /* Traffic manager hierarchy commit */
1976 ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
1977 int clear_on_fail, struct rte_tm_error *error)
1979 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1983 if (tm->hierarchy_frozen)
1984 return -rte_tm_error_set(error,
1986 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1988 rte_strerror(EBUSY));
1990 ipn3ke_tm_show_commmit(dev);
1992 status = ipn3ke_tm_hierarchy_commit_check(dev, error);
1995 ipn3ke_tm_hierarchy_commit_clear(dev);
1999 ipn3ke_tm_hierarchy_hw_commit(dev, error);
2000 ipn3ke_tm_show(dev);
2005 const struct rte_tm_ops ipn3ke_tm_ops = {
2006 .node_type_get = ipn3ke_pmd_tm_node_type_get,
2007 .capabilities_get = ipn3ke_tm_capabilities_get,
2008 .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
2009 .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
2011 .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2012 .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2013 .shared_wred_context_add_update = NULL,
2014 .shared_wred_context_delete = NULL,
2016 .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2017 .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2018 .shared_shaper_add_update = NULL,
2019 .shared_shaper_delete = NULL,
2021 .node_add = ipn3ke_tm_node_add,
2022 .node_delete = ipn3ke_pmd_tm_node_delete,
2023 .node_suspend = NULL,
2024 .node_resume = NULL,
2025 .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2027 .node_parent_update = NULL,
2028 .node_shaper_update = NULL,
2029 .node_shared_shaper_update = NULL,
2030 .node_stats_update = NULL,
2031 .node_wfq_weight_mode_update = NULL,
2032 .node_cman_update = NULL,
2033 .node_wred_context_update = NULL,
2034 .node_shared_wred_context_update = NULL,
2036 .node_stats_read = NULL,
2040 ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2043 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2044 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2045 struct rte_eth_dev *i40e_pf_eth;
2046 const struct rte_tm_ops *ops;
2052 *(const void **)arg = &ipn3ke_tm_ops;
2053 } else if (rpst->i40e_pf_eth) {
2054 i40e_pf_eth = rpst->i40e_pf_eth;
2055 if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2056 i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2061 *(const void **)arg = ops;