1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev.h>
12 #include <rte_malloc.h>
13 #include <rte_tm_driver.h>
16 #include <rte_sched.h>
17 #include <rte_ethdev_driver.h>
20 #include <rte_rawdev.h>
21 #include <rte_rawdev_pmd.h>
22 #include <rte_bus_ifpga.h>
23 #include <ifpga_logs.h>
25 #include "ipn3ke_rawdev_api.h"
26 #include "ipn3ke_flow.h"
27 #include "ipn3ke_logs.h"
28 #include "ipn3ke_ethdev.h"
30 #define BYTES_IN_MBPS (1000 * 1000 / 8)
31 #define SUBPORT_TC_PERIOD 10
32 #define PIPE_TC_PERIOD 40
34 struct ipn3ke_tm_shaper_params_range_type {
42 struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
45 { 4, 7, 0, 1, 16, 28},
46 { 8, 15, 0, 1, 32, 60},
47 { 16, 31, 0, 1, 64, 124},
48 { 32, 63, 0, 1, 128, 252},
49 { 64, 127, 0, 1, 256, 508},
50 {128, 255, 0, 1, 512, 1020},
51 {256, 511, 0, 1, 1024, 2044},
52 {512, 1023, 0, 1, 2048, 4092},
53 {512, 1023, 1, 2, 4096, 8184},
54 {512, 1023, 2, 4, 8192, 16368},
55 {512, 1023, 3, 8, 16384, 32736},
56 {512, 1023, 4, 16, 32768, 65472},
57 {512, 1023, 5, 32, 65536, 130944},
58 {512, 1023, 6, 64, 131072, 261888},
59 {512, 1023, 7, 128, 262144, 523776},
60 {512, 1023, 8, 256, 524288, 1047552},
61 {512, 1023, 9, 512, 1048576, 2095104},
62 {512, 1023, 10, 1024, 2097152, 4190208},
63 {512, 1023, 11, 2048, 4194304, 8380416},
64 {512, 1023, 12, 4096, 8388608, 16760832},
65 {512, 1023, 13, 8192, 16777216, 33521664},
66 {512, 1023, 14, 16384, 33554432, 67043328},
67 {512, 1023, 15, 32768, 67108864, 134086656},
70 #define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
71 sizeof(struct ipn3ke_tm_shaper_params_range_type))
73 #define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
74 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
76 #define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
77 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
80 ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
82 #define SCRATCH_DATA 0xABCDEF
83 struct ipn3ke_tm_node *nodes;
84 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
90 #if IPN3KE_TM_SCRATCH_RW
91 uint32_t scratch_data;
92 IPN3KE_MASK_WRITE_REG(hw,
97 scratch_data = IPN3KE_MASK_READ_REG(hw,
101 if (scratch_data != SCRATCH_DATA)
104 /* alloc memory for all hierarchy nodes */
105 node_num = hw->port_num +
106 IPN3KE_TM_VT_NODE_NUM +
107 IPN3KE_TM_COS_NODE_NUM;
109 nodes = rte_zmalloc("ipn3ke_tm_nodes",
110 sizeof(struct ipn3ke_tm_node) * node_num,
115 /* alloc memory for Tail Drop Profile */
116 tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
117 sizeof(struct ipn3ke_tm_tdrop_profile) *
118 IPN3KE_TM_TDROP_PROFILE_NUM,
120 if (!tdrop_profile) {
126 hw->port_nodes = nodes;
127 hw->vt_nodes = hw->port_nodes + hw->port_num;
128 hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
129 hw->tdrop_profile = tdrop_profile;
130 hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
132 for (i = 0, nodes = hw->port_nodes;
135 nodes->node_index = i;
136 nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
137 nodes->tm_id = RTE_TM_NODE_ID_NULL;
138 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
139 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
140 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
142 nodes->parent_node = NULL;
143 nodes->shaper_profile.valid = 0;
144 nodes->tdrop_profile = NULL;
145 nodes->n_children = 0;
146 TAILQ_INIT(&nodes->children_node_list);
149 for (i = 0, nodes = hw->vt_nodes;
150 i < IPN3KE_TM_VT_NODE_NUM;
152 nodes->node_index = i;
153 nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
154 nodes->tm_id = RTE_TM_NODE_ID_NULL;
155 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
156 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
157 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
159 nodes->parent_node = NULL;
160 nodes->shaper_profile.valid = 0;
161 nodes->tdrop_profile = NULL;
162 nodes->n_children = 0;
163 TAILQ_INIT(&nodes->children_node_list);
166 for (i = 0, nodes = hw->cos_nodes;
167 i < IPN3KE_TM_COS_NODE_NUM;
169 nodes->node_index = i;
170 nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
171 nodes->tm_id = RTE_TM_NODE_ID_NULL;
172 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
173 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
174 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
176 nodes->parent_node = NULL;
177 nodes->shaper_profile.valid = 0;
178 nodes->tdrop_profile = NULL;
179 nodes->n_children = 0;
180 TAILQ_INIT(&nodes->children_node_list);
183 for (i = 0, tdrop_profile = hw->tdrop_profile;
184 i < IPN3KE_TM_TDROP_PROFILE_NUM;
185 i++, tdrop_profile++) {
186 tdrop_profile->tdrop_profile_id = i;
187 tdrop_profile->n_users = 0;
188 tdrop_profile->valid = 0;
195 ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
197 struct ipn3ke_tm_internals *tm;
198 struct ipn3ke_tm_node *port_node;
202 port_node = &rpst->hw->port_nodes[rpst->port_id];
203 tm->h.port_node = port_node;
205 tm->h.n_shaper_profiles = 0;
206 tm->h.n_tdrop_profiles = 0;
207 tm->h.n_vt_nodes = 0;
208 tm->h.n_cos_nodes = 0;
210 tm->h.port_commit_node = NULL;
211 TAILQ_INIT(&tm->h.vt_commit_node_list);
212 TAILQ_INIT(&tm->h.cos_commit_node_list);
214 tm->hierarchy_frozen = 0;
216 tm->tm_id = rpst->port_id;
219 static struct ipn3ke_tm_shaper_profile *
220 ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
221 uint32_t shaper_profile_id, struct rte_tm_error *error)
223 struct ipn3ke_tm_shaper_profile *sp = NULL;
224 uint32_t level_of_node_id;
227 /* Shaper profile ID must not be NONE. */
228 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
229 rte_tm_error_set(error,
231 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
233 rte_strerror(EINVAL));
238 level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
239 node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
241 switch (level_of_node_id) {
242 case IPN3KE_TM_NODE_LEVEL_PORT:
243 if (node_index >= hw->port_num)
244 rte_tm_error_set(error,
246 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
248 rte_strerror(EEXIST));
250 sp = &hw->port_nodes[node_index].shaper_profile;
254 case IPN3KE_TM_NODE_LEVEL_VT:
255 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
256 rte_tm_error_set(error,
258 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
260 rte_strerror(EEXIST));
262 sp = &hw->vt_nodes[node_index].shaper_profile;
266 case IPN3KE_TM_NODE_LEVEL_COS:
267 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
268 rte_tm_error_set(error,
270 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
272 rte_strerror(EEXIST));
274 sp = &hw->cos_nodes[node_index].shaper_profile;
278 rte_tm_error_set(error,
280 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
282 rte_strerror(EEXIST));
288 static struct ipn3ke_tm_tdrop_profile *
289 ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
290 uint32_t tdrop_profile_id)
292 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
294 if (tdrop_profile_id >= hw->tdrop_profile_num)
297 tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
298 if (tdrop_profile->valid)
299 return tdrop_profile;
304 static struct ipn3ke_tm_node *
305 ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
306 uint32_t node_id, uint32_t state_mask)
308 uint32_t level_of_node_id;
310 struct ipn3ke_tm_node *n;
312 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
313 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
315 switch (level_of_node_id) {
316 case IPN3KE_TM_NODE_LEVEL_PORT:
317 if (node_index >= hw->port_num)
319 n = &hw->port_nodes[node_index];
322 case IPN3KE_TM_NODE_LEVEL_VT:
323 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
325 n = &hw->vt_nodes[node_index];
328 case IPN3KE_TM_NODE_LEVEL_COS:
329 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
331 n = &hw->cos_nodes[node_index];
338 /* Check tm node status */
339 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
340 if (n->tm_id != RTE_TM_NODE_ID_NULL ||
341 n->parent_node_id != RTE_TM_NODE_ID_NULL ||
342 n->parent_node != NULL ||
344 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
346 } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
347 if (n->tm_id == RTE_TM_NODE_ID_NULL ||
348 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
349 n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
350 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
351 n->parent_node == NULL)) {
352 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
355 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
358 if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
359 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
361 else if (n->tm_id == tm_id)
370 /* Traffic manager node type get */
372 ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
373 uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
375 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
376 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
378 struct ipn3ke_tm_node *node;
382 return -rte_tm_error_set(error,
384 RTE_TM_ERROR_TYPE_UNSPECIFIED,
386 rte_strerror(EINVAL));
391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
392 node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
393 if (node_id == RTE_TM_NODE_ID_NULL ||
395 return -rte_tm_error_set(error,
397 RTE_TM_ERROR_TYPE_NODE_ID,
399 rte_strerror(EINVAL));
401 *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
406 #define WRED_SUPPORTED 0
408 #define STATS_MASK_DEFAULT \
409 (RTE_TM_STATS_N_PKTS | \
410 RTE_TM_STATS_N_BYTES | \
411 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
412 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
414 #define STATS_MASK_QUEUE \
415 (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
417 /* Traffic manager capabilities get */
419 ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
420 struct rte_tm_capabilities *cap, struct rte_tm_error *error)
423 return -rte_tm_error_set(error,
425 RTE_TM_ERROR_TYPE_CAPABILITIES,
427 rte_strerror(EINVAL));
429 /* set all the parameters to 0 first. */
430 memset(cap, 0, sizeof(*cap));
432 cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
433 cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
435 cap->non_leaf_nodes_identical = 0;
436 cap->leaf_nodes_identical = 1;
438 cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
439 cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
440 cap->shaper_private_dual_rate_n_max = 0;
441 cap->shaper_private_rate_min = 1;
442 cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
444 cap->shaper_shared_n_max = 0;
445 cap->shaper_shared_n_nodes_per_shaper_max = 0;
446 cap->shaper_shared_n_shapers_per_node_max = 0;
447 cap->shaper_shared_dual_rate_n_max = 0;
448 cap->shaper_shared_rate_min = 0;
449 cap->shaper_shared_rate_max = 0;
451 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
452 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
454 cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
455 cap->sched_sp_n_priorities_max = 3;
456 cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
457 cap->sched_wfq_n_groups_max = 1;
458 cap->sched_wfq_weight_max = UINT32_MAX;
460 cap->cman_wred_packet_mode_supported = 0;
461 cap->cman_wred_byte_mode_supported = 0;
462 cap->cman_head_drop_supported = 0;
463 cap->cman_wred_context_n_max = 0;
464 cap->cman_wred_context_private_n_max = 0;
465 cap->cman_wred_context_shared_n_max = 0;
466 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
467 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
470 * cap->mark_vlan_dei_supported = {0, 0, 0};
471 * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
472 * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
473 * cap->mark_ip_dscp_supported = {0, 0, 0};
476 cap->dynamic_update_mask = 0;
483 /* Traffic manager level capabilities get */
485 ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
486 uint32_t level_id, struct rte_tm_level_capabilities *cap,
487 struct rte_tm_error *error)
489 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
492 return -rte_tm_error_set(error,
494 RTE_TM_ERROR_TYPE_CAPABILITIES,
496 rte_strerror(EINVAL));
498 if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
499 return -rte_tm_error_set(error,
501 RTE_TM_ERROR_TYPE_LEVEL_ID,
503 rte_strerror(EINVAL));
505 /* set all the parameters to 0 first. */
506 memset(cap, 0, sizeof(*cap));
509 case IPN3KE_TM_NODE_LEVEL_PORT:
510 cap->n_nodes_max = hw->port_num;
511 cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
512 cap->n_nodes_leaf_max = 0;
513 cap->non_leaf_nodes_identical = 0;
514 cap->leaf_nodes_identical = 0;
516 cap->nonleaf.shaper_private_supported = 0;
517 cap->nonleaf.shaper_private_dual_rate_supported = 0;
518 cap->nonleaf.shaper_private_rate_min = 1;
519 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
520 cap->nonleaf.shaper_shared_n_max = 0;
522 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
523 cap->nonleaf.sched_sp_n_priorities_max = 1;
524 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
525 cap->nonleaf.sched_wfq_n_groups_max = 0;
526 cap->nonleaf.sched_wfq_weight_max = 0;
528 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
531 case IPN3KE_TM_NODE_LEVEL_VT:
532 cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
533 cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
534 cap->n_nodes_leaf_max = 0;
535 cap->non_leaf_nodes_identical = 0;
536 cap->leaf_nodes_identical = 0;
538 cap->nonleaf.shaper_private_supported = 0;
539 cap->nonleaf.shaper_private_dual_rate_supported = 0;
540 cap->nonleaf.shaper_private_rate_min = 1;
541 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
542 cap->nonleaf.shaper_shared_n_max = 0;
544 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
545 cap->nonleaf.sched_sp_n_priorities_max = 1;
546 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
547 cap->nonleaf.sched_wfq_n_groups_max = 0;
548 cap->nonleaf.sched_wfq_weight_max = 0;
550 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
553 case IPN3KE_TM_NODE_LEVEL_COS:
554 cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
555 cap->n_nodes_nonleaf_max = 0;
556 cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
557 cap->non_leaf_nodes_identical = 0;
558 cap->leaf_nodes_identical = 0;
560 cap->leaf.shaper_private_supported = 0;
561 cap->leaf.shaper_private_dual_rate_supported = 0;
562 cap->leaf.shaper_private_rate_min = 0;
563 cap->leaf.shaper_private_rate_max = 0;
564 cap->leaf.shaper_shared_n_max = 0;
566 cap->leaf.cman_head_drop_supported = 0;
567 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
568 cap->leaf.cman_wred_byte_mode_supported = 0;
569 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
570 cap->leaf.cman_wred_context_shared_n_max = 0;
572 cap->leaf.stats_mask = STATS_MASK_QUEUE;
576 return -rte_tm_error_set(error,
578 RTE_TM_ERROR_TYPE_LEVEL_ID,
580 rte_strerror(EINVAL));
587 /* Traffic manager node capabilities get */
589 ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
590 uint32_t node_id, struct rte_tm_node_capabilities *cap,
591 struct rte_tm_error *error)
593 struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
594 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
595 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
597 struct ipn3ke_tm_node *tm_node;
601 return -rte_tm_error_set(error,
603 RTE_TM_ERROR_TYPE_CAPABILITIES,
605 rte_strerror(EINVAL));
610 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
611 tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
613 return -rte_tm_error_set(error,
615 RTE_TM_ERROR_TYPE_NODE_ID,
617 rte_strerror(EINVAL));
619 if (tm_node->tm_id != representor->port_id)
620 return -rte_tm_error_set(error,
622 RTE_TM_ERROR_TYPE_NODE_ID,
624 rte_strerror(EINVAL));
626 /* set all the parameters to 0 first. */
627 memset(cap, 0, sizeof(*cap));
629 switch (tm_node->level) {
630 case IPN3KE_TM_NODE_LEVEL_PORT:
631 cap->shaper_private_supported = 1;
632 cap->shaper_private_dual_rate_supported = 0;
633 cap->shaper_private_rate_min = 1;
634 cap->shaper_private_rate_max = UINT32_MAX;
635 cap->shaper_shared_n_max = 0;
637 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
638 cap->nonleaf.sched_sp_n_priorities_max = 1;
639 cap->nonleaf.sched_wfq_n_children_per_group_max =
640 IPN3KE_TM_VT_NODE_NUM;
641 cap->nonleaf.sched_wfq_n_groups_max = 1;
642 cap->nonleaf.sched_wfq_weight_max = 1;
644 cap->stats_mask = STATS_MASK_DEFAULT;
647 case IPN3KE_TM_NODE_LEVEL_VT:
648 cap->shaper_private_supported = 1;
649 cap->shaper_private_dual_rate_supported = 0;
650 cap->shaper_private_rate_min = 1;
651 cap->shaper_private_rate_max = UINT32_MAX;
652 cap->shaper_shared_n_max = 0;
654 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
655 cap->nonleaf.sched_sp_n_priorities_max = 1;
656 cap->nonleaf.sched_wfq_n_children_per_group_max =
657 IPN3KE_TM_COS_NODE_NUM;
658 cap->nonleaf.sched_wfq_n_groups_max = 1;
659 cap->nonleaf.sched_wfq_weight_max = 1;
661 cap->stats_mask = STATS_MASK_DEFAULT;
664 case IPN3KE_TM_NODE_LEVEL_COS:
665 cap->shaper_private_supported = 0;
666 cap->shaper_private_dual_rate_supported = 0;
667 cap->shaper_private_rate_min = 0;
668 cap->shaper_private_rate_max = 0;
669 cap->shaper_shared_n_max = 0;
671 cap->leaf.cman_head_drop_supported = 0;
672 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
673 cap->leaf.cman_wred_byte_mode_supported = 0;
674 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
675 cap->leaf.cman_wred_context_shared_n_max = 0;
677 cap->stats_mask = STATS_MASK_QUEUE;
687 ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
688 struct ipn3ke_tm_shaper_profile *local_profile,
689 const struct ipn3ke_tm_shaper_params_range_type *ref_data)
692 const struct ipn3ke_tm_shaper_params_range_type *r;
695 rate = profile->peak.rate;
696 for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
697 if (rate >= r->low &&
699 local_profile->m = (rate / 4) / r->exp2;
700 local_profile->e = r->exp;
701 local_profile->rate = rate;
711 ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
712 uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
713 struct rte_tm_error *error)
715 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
716 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
717 struct ipn3ke_tm_shaper_profile *sp;
719 /* Shaper profile must not exist. */
720 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
721 if (!sp || (sp && sp->valid))
722 return -rte_tm_error_set(error,
724 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
726 rte_strerror(EEXIST));
728 /* Profile must not be NULL. */
730 return -rte_tm_error_set(error,
732 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
734 rte_strerror(EINVAL));
736 /* Peak rate: non-zero, 32-bit */
737 if (profile->peak.rate == 0 ||
738 profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
739 return -rte_tm_error_set(error,
741 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
743 rte_strerror(EINVAL));
745 /* Peak size: non-zero, 32-bit */
746 if (profile->peak.size != 0)
747 return -rte_tm_error_set(error,
749 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
751 rte_strerror(EINVAL));
753 /* Dual-rate profiles are not supported. */
754 if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
755 return -rte_tm_error_set(error,
757 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
759 rte_strerror(EINVAL));
761 /* Packet length adjust: 24 bytes */
762 if (profile->pkt_length_adjust != 0)
763 return -rte_tm_error_set(error,
765 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
767 rte_strerror(EINVAL));
769 if (ipn3ke_tm_shaper_parame_trans(profile,
771 ipn3ke_tm_shaper_params_rang)) {
772 return -rte_tm_error_set(error,
774 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
776 rte_strerror(EINVAL));
779 rte_memcpy(&sp->params, profile, sizeof(sp->params));
782 tm->h.n_shaper_profiles++;
787 /* Traffic manager shaper profile delete */
789 ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
790 uint32_t shaper_profile_id, struct rte_tm_error *error)
792 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
793 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
794 struct ipn3ke_tm_shaper_profile *sp;
797 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
798 if (!sp || (sp && !sp->valid))
799 return -rte_tm_error_set(error,
801 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
803 rte_strerror(EINVAL));
806 tm->h.n_shaper_profiles--;
812 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
813 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
814 struct rte_tm_error *error)
816 enum rte_color color;
818 /* TDROP profile ID must not be NONE. */
819 if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
820 return -rte_tm_error_set(error,
822 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
824 rte_strerror(EINVAL));
826 /* Profile must not be NULL. */
828 return -rte_tm_error_set(error,
830 RTE_TM_ERROR_TYPE_WRED_PROFILE,
832 rte_strerror(EINVAL));
834 /* TDROP profile should be in packet mode */
835 if (profile->packet_mode != 0)
836 return -rte_tm_error_set(error,
838 RTE_TM_ERROR_TYPE_WRED_PROFILE,
840 rte_strerror(ENOTSUP));
842 /* min_th <= max_th, max_th > 0 */
843 for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
844 uint64_t min_th = profile->red_params[color].min_th;
845 uint64_t max_th = profile->red_params[color].max_th;
847 if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
848 IPN3KE_TDROP_TH1_SHIFT) ||
850 return -rte_tm_error_set(error,
852 RTE_TM_ERROR_TYPE_WRED_PROFILE,
854 rte_strerror(EINVAL));
861 ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
862 struct ipn3ke_tm_tdrop_profile *tp)
865 IPN3KE_MASK_WRITE_REG(hw,
866 IPN3KE_CCB_PROFILE_MS,
869 IPN3KE_CCB_PROFILE_MS_MASK);
871 IPN3KE_MASK_WRITE_REG(hw,
872 IPN3KE_CCB_PROFILE_P,
873 tp->tdrop_profile_id,
875 IPN3KE_CCB_PROFILE_MASK);
877 IPN3KE_MASK_WRITE_REG(hw,
878 IPN3KE_CCB_PROFILE_MS,
881 IPN3KE_CCB_PROFILE_MS_MASK);
883 IPN3KE_MASK_WRITE_REG(hw,
884 IPN3KE_CCB_PROFILE_P,
885 tp->tdrop_profile_id,
887 IPN3KE_CCB_PROFILE_MASK);
893 /* Traffic manager TDROP profile add */
895 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
896 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
897 struct rte_tm_error *error)
899 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
900 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
901 struct ipn3ke_tm_tdrop_profile *tp;
906 /* Check input params */
907 status = ipn3ke_tm_tdrop_profile_check(dev,
914 /* Memory allocation */
915 tp = &hw->tdrop_profile[tdrop_profile_id];
919 min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
920 th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
921 th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
922 IPN3KE_TDROP_TH2_MASK);
925 rte_memcpy(&tp->params, profile, sizeof(tp->params));
928 tm->h.n_tdrop_profiles++;
931 ipn3ke_hw_tm_tdrop_wr(hw, tp);
936 /* Traffic manager TDROP profile delete */
938 ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
939 uint32_t tdrop_profile_id, struct rte_tm_error *error)
941 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
942 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
943 struct ipn3ke_tm_tdrop_profile *tp;
946 tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
948 return -rte_tm_error_set(error,
950 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
952 rte_strerror(EINVAL));
956 return -rte_tm_error_set(error,
958 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
960 rte_strerror(EBUSY));
964 tm->h.n_tdrop_profiles--;
967 ipn3ke_hw_tm_tdrop_wr(hw, tp);
973 ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
974 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
975 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
976 struct rte_tm_error *error)
978 uint32_t level_of_node_id;
980 uint32_t parent_level_id;
982 if (node_id == RTE_TM_NODE_ID_NULL)
983 return -rte_tm_error_set(error,
985 RTE_TM_ERROR_TYPE_NODE_ID,
987 rte_strerror(EINVAL));
989 /* priority: must be 0, 1, 2, 3 */
990 if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
991 return -rte_tm_error_set(error,
993 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
995 rte_strerror(EINVAL));
997 /* weight: must be 1 .. 255 */
998 if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
999 return -rte_tm_error_set(error,
1001 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1003 rte_strerror(EINVAL));
1005 /* check node id and parent id*/
1006 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1007 if (level_of_node_id != level_id)
1008 return -rte_tm_error_set(error,
1010 RTE_TM_ERROR_TYPE_NODE_ID,
1012 rte_strerror(EINVAL));
1013 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1014 parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1016 case IPN3KE_TM_NODE_LEVEL_PORT:
1017 if (node_index != tm_id)
1018 return -rte_tm_error_set(error,
1020 RTE_TM_ERROR_TYPE_NODE_ID,
1022 rte_strerror(EINVAL));
1023 if (parent_node_id != RTE_TM_NODE_ID_NULL)
1024 return -rte_tm_error_set(error,
1026 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1028 rte_strerror(EINVAL));
1031 case IPN3KE_TM_NODE_LEVEL_VT:
1032 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1033 return -rte_tm_error_set(error,
1035 RTE_TM_ERROR_TYPE_NODE_ID,
1037 rte_strerror(EINVAL));
1038 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1039 return -rte_tm_error_set(error,
1041 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1043 rte_strerror(EINVAL));
1046 case IPN3KE_TM_NODE_LEVEL_COS:
1047 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1048 return -rte_tm_error_set(error,
1050 RTE_TM_ERROR_TYPE_NODE_ID,
1052 rte_strerror(EINVAL));
1053 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1054 return -rte_tm_error_set(error,
1056 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1058 rte_strerror(EINVAL));
1061 return -rte_tm_error_set(error,
1063 RTE_TM_ERROR_TYPE_LEVEL_ID,
1065 rte_strerror(EINVAL));
1068 /* params: must not be NULL */
1070 return -rte_tm_error_set(error,
1072 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1074 rte_strerror(EINVAL));
1075 /* No shared shapers */
1076 if (params->n_shared_shapers != 0)
1077 return -rte_tm_error_set(error,
1079 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1081 rte_strerror(EINVAL));
1086 ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1087 uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1088 struct rte_tm_error *error)
1090 uint32_t node_index;
1091 uint32_t parent_index;
1092 uint32_t parent_index1;
1094 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1095 parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1096 parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1098 case IPN3KE_TM_NODE_LEVEL_PORT:
1101 case IPN3KE_TM_NODE_LEVEL_VT:
1102 if (parent_index != tm_id)
1103 return -rte_tm_error_set(error,
1105 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1107 rte_strerror(EINVAL));
1110 case IPN3KE_TM_NODE_LEVEL_COS:
1111 if (parent_index != parent_index1)
1112 return -rte_tm_error_set(error,
1114 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1116 rte_strerror(EINVAL));
1119 return -rte_tm_error_set(error,
1121 RTE_TM_ERROR_TYPE_LEVEL_ID,
1123 rte_strerror(EINVAL));
1129 /* Traffic manager node add */
1131 ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1132 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1133 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1134 struct rte_tm_error *error)
1136 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1137 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1139 struct ipn3ke_tm_node *n, *parent_node;
1140 uint32_t node_state, state_mask;
1144 if (tm->hierarchy_frozen)
1145 return -rte_tm_error_set(error,
1147 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1149 rte_strerror(EBUSY));
1153 status = ipn3ke_tm_node_add_check_parameter(tm_id,
1164 status = ipn3ke_tm_node_add_check_mount(tm_id,
1172 /* Shaper profile ID must not be NONE. */
1173 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1174 params->shaper_profile_id != node_id)
1175 return -rte_tm_error_set(error,
1177 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1179 rte_strerror(EINVAL));
1181 /* Memory allocation */
1183 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1184 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1185 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1187 return -rte_tm_error_set(error,
1189 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1191 rte_strerror(EINVAL));
1192 node_state = n->node_state;
1194 /* Check parent node */
1196 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1197 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1198 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1199 parent_node = ipn3ke_hw_tm_node_search(hw,
1204 return -rte_tm_error_set(error,
1206 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1208 rte_strerror(EINVAL));
1214 case IPN3KE_TM_NODE_LEVEL_PORT:
1215 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1217 tm->h.port_commit_node = n;
1220 case IPN3KE_TM_NODE_LEVEL_VT:
1221 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1222 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1224 parent_node->n_children++;
1226 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1228 parent_node->n_children++;
1231 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1232 n->parent_node_id = parent_node_id;
1234 n->parent_node = parent_node;
1238 case IPN3KE_TM_NODE_LEVEL_COS:
1239 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1240 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1243 parent_node->n_children++;
1244 tm->h.n_cos_nodes++;
1245 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1247 parent_node->n_children++;
1248 tm->h.n_cos_nodes++;
1250 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1251 n->parent_node_id = parent_node_id;
1253 n->parent_node = parent_node;
1257 return -rte_tm_error_set(error,
1259 RTE_TM_ERROR_TYPE_LEVEL_ID,
1261 rte_strerror(EINVAL));
1265 n->priority = priority;
1268 if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1269 params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1270 n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1271 params->leaf.wred.wred_profile_id);
1273 rte_memcpy(&n->params, params, sizeof(n->params));
1279 ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1280 uint32_t node_id, struct rte_tm_error *error)
1282 uint32_t level_of_node_id;
1283 uint32_t node_index;
1285 if (node_id == RTE_TM_NODE_ID_NULL)
1286 return -rte_tm_error_set(error,
1288 RTE_TM_ERROR_TYPE_NODE_ID,
1290 rte_strerror(EINVAL));
1292 /* check node id and parent id*/
1293 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1294 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1295 switch (level_of_node_id) {
1296 case IPN3KE_TM_NODE_LEVEL_PORT:
1297 if (node_index != tm_id)
1298 return -rte_tm_error_set(error,
1300 RTE_TM_ERROR_TYPE_NODE_ID,
1302 rte_strerror(EINVAL));
1305 case IPN3KE_TM_NODE_LEVEL_VT:
1306 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1307 return -rte_tm_error_set(error,
1309 RTE_TM_ERROR_TYPE_NODE_ID,
1311 rte_strerror(EINVAL));
1314 case IPN3KE_TM_NODE_LEVEL_COS:
1315 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1316 return -rte_tm_error_set(error,
1318 RTE_TM_ERROR_TYPE_NODE_ID,
1320 rte_strerror(EINVAL));
1323 return -rte_tm_error_set(error,
1325 RTE_TM_ERROR_TYPE_LEVEL_ID,
1327 rte_strerror(EINVAL));
1333 /* Traffic manager node delete */
1335 ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1336 uint32_t node_id, struct rte_tm_error *error)
1338 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1339 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1340 struct ipn3ke_tm_node *n, *parent_node;
1343 uint32_t level_of_node_id;
1344 uint32_t node_state;
1345 uint32_t state_mask;
1347 /* Check hierarchy changes are currently allowed */
1348 if (tm->hierarchy_frozen)
1349 return -rte_tm_error_set(error,
1351 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1353 rte_strerror(EBUSY));
1357 status = ipn3ke_tm_node_del_check_parameter(tm_id,
1363 /* Check existing */
1365 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1366 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1367 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1369 return -rte_tm_error_set(error,
1371 RTE_TM_ERROR_TYPE_NODE_ID,
1373 rte_strerror(EINVAL));
1375 if (n->n_children > 0)
1376 return -rte_tm_error_set(error,
1378 RTE_TM_ERROR_TYPE_NODE_ID,
1380 rte_strerror(EINVAL));
1382 node_state = n->node_state;
1384 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1386 /* Check parent node */
1387 if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1389 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1390 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1391 parent_node = ipn3ke_hw_tm_node_search(hw,
1396 return -rte_tm_error_set(error,
1398 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1400 rte_strerror(EINVAL));
1401 if (n->parent_node != parent_node)
1402 return -rte_tm_error_set(error,
1404 RTE_TM_ERROR_TYPE_NODE_ID,
1406 rte_strerror(EINVAL));
1411 switch (level_of_node_id) {
1412 case IPN3KE_TM_NODE_LEVEL_PORT:
1413 if (tm->h.port_node != n)
1414 return -rte_tm_error_set(error,
1416 RTE_TM_ERROR_TYPE_NODE_ID,
1418 rte_strerror(EINVAL));
1419 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1420 tm->h.port_commit_node = n;
1424 case IPN3KE_TM_NODE_LEVEL_VT:
1425 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1427 TAILQ_REMOVE(&parent_node->children_node_list,
1429 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1431 parent_node->n_children--;
1433 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1435 parent_node->n_children--;
1438 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1442 case IPN3KE_TM_NODE_LEVEL_COS:
1443 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1445 TAILQ_REMOVE(&parent_node->children_node_list,
1447 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1450 parent_node->n_children--;
1451 tm->h.n_cos_nodes--;
1452 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1454 parent_node->n_children--;
1455 tm->h.n_cos_nodes--;
1457 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1461 return -rte_tm_error_set(error,
1463 RTE_TM_ERROR_TYPE_LEVEL_ID,
1465 rte_strerror(EINVAL));
1472 ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1473 struct rte_tm_error *error)
1475 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1477 struct ipn3ke_tm_node_list *nl;
1478 struct ipn3ke_tm_node *n, *parent_node;
1482 nl = &tm->h.cos_commit_node_list;
1483 TAILQ_FOREACH(n, nl, node) {
1484 parent_node = n->parent_node;
1485 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1486 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1487 n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1488 n->tm_id != tm_id ||
1489 parent_node == NULL ||
1491 parent_node->node_state ==
1492 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1494 parent_node->node_state ==
1495 IPN3KE_TM_NODE_STATE_IDLE) ||
1496 n->shaper_profile.valid == 0) {
1497 return -rte_tm_error_set(error,
1499 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1501 rte_strerror(EINVAL));
1503 } else if (n->node_state ==
1504 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1505 if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1506 n->n_children != 0) {
1507 return -rte_tm_error_set(error,
1509 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1511 rte_strerror(EINVAL));
1516 nl = &tm->h.vt_commit_node_list;
1517 TAILQ_FOREACH(n, nl, node) {
1518 parent_node = n->parent_node;
1519 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1520 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1521 n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1522 n->tm_id != tm_id ||
1523 parent_node == NULL ||
1525 parent_node->node_state ==
1526 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1528 parent_node->node_state ==
1529 IPN3KE_TM_NODE_STATE_IDLE) ||
1530 n->shaper_profile.valid == 0) {
1531 return -rte_tm_error_set(error,
1533 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1535 rte_strerror(EINVAL));
1537 } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL)
1538 return -rte_tm_error_set(error,
1540 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1542 rte_strerror(EINVAL));
1545 n = tm->h.port_commit_node;
1547 (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1548 n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1549 n->tm_id != tm_id ||
1550 n->parent_node != NULL ||
1551 n->shaper_profile.valid == 0)) {
1552 return -rte_tm_error_set(error,
1554 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1556 rte_strerror(EINVAL));
1563 ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1564 struct ipn3ke_tm_node *n,
1565 struct ipn3ke_tm_node *parent_node)
1572 case IPN3KE_TM_NODE_LEVEL_PORT:
1576 IPN3KE_MASK_WRITE_REG(hw,
1577 IPN3KE_QOS_TYPE_L3_X,
1580 IPN3KE_QOS_TYPE_MASK);
1585 IPN3KE_MASK_WRITE_REG(hw,
1586 IPN3KE_QOS_SCH_WT_L3_X,
1589 IPN3KE_QOS_SCH_WT_MASK);
1594 if (n->shaper_profile.valid)
1595 IPN3KE_MASK_WRITE_REG(hw,
1596 IPN3KE_QOS_SHAP_WT_L3_X,
1598 ((n->shaper_profile.e << 10) |
1599 n->shaper_profile.m),
1600 IPN3KE_QOS_SHAP_WT_MASK);
1603 case IPN3KE_TM_NODE_LEVEL_VT:
1607 IPN3KE_MASK_WRITE_REG(hw,
1608 IPN3KE_QOS_TYPE_L2_X,
1611 IPN3KE_QOS_TYPE_MASK);
1616 IPN3KE_MASK_WRITE_REG(hw,
1617 IPN3KE_QOS_SCH_WT_L2_X,
1620 IPN3KE_QOS_SCH_WT_MASK);
1625 if (n->shaper_profile.valid)
1626 IPN3KE_MASK_WRITE_REG(hw,
1627 IPN3KE_QOS_SHAP_WT_L2_X,
1629 ((n->shaper_profile.e << 10) |
1630 n->shaper_profile.m),
1631 IPN3KE_QOS_SHAP_WT_MASK);
1637 IPN3KE_MASK_WRITE_REG(hw,
1638 IPN3KE_QOS_MAP_L2_X,
1640 parent_node->node_index,
1641 IPN3KE_QOS_MAP_L2_MASK);
1644 case IPN3KE_TM_NODE_LEVEL_COS:
1646 * Configure Tail Drop mapping
1648 if (n->tdrop_profile && n->tdrop_profile->valid) {
1649 IPN3KE_MASK_WRITE_REG(hw,
1650 IPN3KE_CCB_QPROFILE_Q,
1652 n->tdrop_profile->tdrop_profile_id,
1653 IPN3KE_CCB_QPROFILE_MASK);
1659 IPN3KE_MASK_WRITE_REG(hw,
1660 IPN3KE_QOS_TYPE_L1_X,
1663 IPN3KE_QOS_TYPE_MASK);
1668 IPN3KE_MASK_WRITE_REG(hw,
1669 IPN3KE_QOS_SCH_WT_L1_X,
1672 IPN3KE_QOS_SCH_WT_MASK);
1677 if (n->shaper_profile.valid)
1678 IPN3KE_MASK_WRITE_REG(hw,
1679 IPN3KE_QOS_SHAP_WT_L1_X,
1681 ((n->shaper_profile.e << 10) |
1682 n->shaper_profile.m),
1683 IPN3KE_QOS_SHAP_WT_MASK);
1686 * Configure COS queue to port
1688 while (IPN3KE_MASK_READ_REG(hw,
1689 IPN3KE_QM_UID_CONFIG_CTRL,
1694 if (parent_node && parent_node->parent_node)
1695 IPN3KE_MASK_WRITE_REG(hw,
1696 IPN3KE_QM_UID_CONFIG_DATA,
1698 (1 << 8 | parent_node->parent_node->node_index),
1701 IPN3KE_MASK_WRITE_REG(hw,
1702 IPN3KE_QM_UID_CONFIG_CTRL,
1707 while (IPN3KE_MASK_READ_REG(hw,
1708 IPN3KE_QM_UID_CONFIG_CTRL,
1717 IPN3KE_MASK_WRITE_REG(hw,
1718 IPN3KE_QOS_MAP_L1_X,
1720 parent_node->node_index,
1721 IPN3KE_QOS_MAP_L1_MASK);
1732 ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1733 struct rte_tm_error *error)
1735 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1736 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1737 struct ipn3ke_tm_node_list *nl;
1738 struct ipn3ke_tm_node *n, *nn, *parent_node;
1740 n = tm->h.port_commit_node;
1742 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1743 tm->h.port_commit_node = NULL;
1745 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1746 } else if (n->node_state ==
1747 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1748 tm->h.port_commit_node = NULL;
1750 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1751 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1753 n->tm_id = RTE_TM_NODE_ID_NULL;
1755 return -rte_tm_error_set(error,
1757 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1759 rte_strerror(EINVAL));
1761 parent_node = n->parent_node;
1762 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1765 nl = &tm->h.vt_commit_node_list;
1766 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1767 nn = TAILQ_NEXT(n, node);
1768 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1769 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1770 parent_node = n->parent_node;
1771 TAILQ_REMOVE(nl, n, node);
1772 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1774 } else if (n->node_state ==
1775 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1776 parent_node = n->parent_node;
1777 TAILQ_REMOVE(nl, n, node);
1779 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1780 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1781 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1783 n->tm_id = RTE_TM_NODE_ID_NULL;
1784 n->parent_node = NULL;
1786 return -rte_tm_error_set(error,
1788 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1790 rte_strerror(EINVAL));
1792 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1795 nl = &tm->h.cos_commit_node_list;
1796 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1797 nn = TAILQ_NEXT(n, node);
1798 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1799 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1800 parent_node = n->parent_node;
1801 TAILQ_REMOVE(nl, n, node);
1802 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1804 } else if (n->node_state ==
1805 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1806 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1807 parent_node = n->parent_node;
1808 TAILQ_REMOVE(nl, n, node);
1810 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1811 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1812 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1814 n->tm_id = RTE_TM_NODE_ID_NULL;
1815 n->parent_node = NULL;
1817 if (n->tdrop_profile)
1818 n->tdrop_profile->n_users--;
1820 return -rte_tm_error_set(error,
1822 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1824 rte_strerror(EINVAL));
1826 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1833 ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1835 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1836 struct ipn3ke_tm_node_list *nl;
1837 struct ipn3ke_tm_node *n;
1838 struct ipn3ke_tm_node *nn;
1840 n = tm->h.port_commit_node;
1842 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1843 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1845 n->tm_id = RTE_TM_NODE_ID_NULL;
1848 tm->h.port_commit_node = NULL;
1851 nl = &tm->h.vt_commit_node_list;
1852 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1853 nn = TAILQ_NEXT(n, node);
1855 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1856 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1857 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1859 n->tm_id = RTE_TM_NODE_ID_NULL;
1860 n->parent_node = NULL;
1864 TAILQ_REMOVE(nl, n, node);
1867 nl = &tm->h.cos_commit_node_list;
1868 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1869 nn = TAILQ_NEXT(n, node);
1871 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1872 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1873 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1875 n->tm_id = RTE_TM_NODE_ID_NULL;
1876 n->parent_node = NULL;
1877 tm->h.n_cos_nodes--;
1879 TAILQ_REMOVE(nl, n, node);
1886 ipn3ke_tm_show(struct rte_eth_dev *dev)
1888 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1890 struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1891 struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1892 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1899 IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1901 port_n = tm->h.port_node;
1902 IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1903 str_state[port_n->node_state]);
1905 vt_nl = &tm->h.port_node->children_node_list;
1906 TAILQ_FOREACH(vt_n, vt_nl, node) {
1907 cos_nl = &vt_n->children_node_list;
1908 IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
1909 TAILQ_FOREACH(cos_n, cos_nl, node) {
1910 if (cos_n->parent_node_id !=
1911 (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1912 IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1914 str_state[cos_n->node_state]);
1916 IPN3KE_AFU_PMD_DEBUG("\n");
1921 ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1923 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1925 struct ipn3ke_tm_node_list *nl;
1926 struct ipn3ke_tm_node *n;
1927 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1934 IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1935 n = tm->h.port_commit_node;
1936 IPN3KE_AFU_PMD_DEBUG("Port: ");
1938 IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1940 str_state[n->node_state]);
1941 IPN3KE_AFU_PMD_DEBUG("\n");
1943 nl = &tm->h.vt_commit_node_list;
1944 IPN3KE_AFU_PMD_DEBUG("VT : ");
1945 TAILQ_FOREACH(n, nl, node) {
1946 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1948 str_state[n->node_state]);
1950 IPN3KE_AFU_PMD_DEBUG("\n");
1952 nl = &tm->h.cos_commit_node_list;
1953 IPN3KE_AFU_PMD_DEBUG("COS : ");
1954 TAILQ_FOREACH(n, nl, node) {
1955 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1957 str_state[n->node_state]);
1959 IPN3KE_AFU_PMD_DEBUG("\n");
1962 /* Traffic manager hierarchy commit */
1964 ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
1965 int clear_on_fail, struct rte_tm_error *error)
1967 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1971 if (tm->hierarchy_frozen)
1972 return -rte_tm_error_set(error,
1974 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1976 rte_strerror(EBUSY));
1978 ipn3ke_tm_show_commmit(dev);
1980 status = ipn3ke_tm_hierarchy_commit_check(dev, error);
1983 ipn3ke_tm_hierarchy_commit_clear(dev);
1987 ipn3ke_tm_hierarchy_hw_commit(dev, error);
1988 ipn3ke_tm_show(dev);
1993 const struct rte_tm_ops ipn3ke_tm_ops = {
1994 .node_type_get = ipn3ke_pmd_tm_node_type_get,
1995 .capabilities_get = ipn3ke_tm_capabilities_get,
1996 .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
1997 .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
1999 .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2000 .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2001 .shared_wred_context_add_update = NULL,
2002 .shared_wred_context_delete = NULL,
2004 .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2005 .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2006 .shared_shaper_add_update = NULL,
2007 .shared_shaper_delete = NULL,
2009 .node_add = ipn3ke_tm_node_add,
2010 .node_delete = ipn3ke_pmd_tm_node_delete,
2011 .node_suspend = NULL,
2012 .node_resume = NULL,
2013 .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2015 .node_parent_update = NULL,
2016 .node_shaper_update = NULL,
2017 .node_shared_shaper_update = NULL,
2018 .node_stats_update = NULL,
2019 .node_wfq_weight_mode_update = NULL,
2020 .node_cman_update = NULL,
2021 .node_wred_context_update = NULL,
2022 .node_shared_wred_context_update = NULL,
2024 .node_stats_read = NULL,
2028 ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2031 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2032 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2033 struct rte_eth_dev *i40e_pf_eth;
2034 const struct rte_tm_ops *ops;
2040 *(const void **)arg = &ipn3ke_tm_ops;
2041 } else if (rpst->i40e_pf_eth) {
2042 i40e_pf_eth = rpst->i40e_pf_eth;
2043 if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2044 i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2049 *(const void **)arg = ops;