1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev.h>
12 #include <rte_malloc.h>
13 #include <rte_tm_driver.h>
16 #include <rte_sched.h>
17 #include <rte_ethdev_driver.h>
20 #include <rte_rawdev.h>
21 #include <rte_rawdev_pmd.h>
22 #include <rte_bus_ifpga.h>
23 #include <ifpga_logs.h>
25 #include "ipn3ke_rawdev_api.h"
26 #include "ipn3ke_flow.h"
27 #include "ipn3ke_logs.h"
28 #include "ipn3ke_ethdev.h"
30 #define BYTES_IN_MBPS (1000 * 1000 / 8)
31 #define SUBPORT_TC_PERIOD 10
32 #define PIPE_TC_PERIOD 40
34 struct ipn3ke_tm_shaper_params_range_type {
42 struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
45 { 4, 7, 0, 1, 16, 28},
46 { 8, 15, 0, 1, 32, 60},
47 { 16, 31, 0, 1, 64, 124},
48 { 32, 63, 0, 1, 128, 252},
49 { 64, 127, 0, 1, 256, 508},
50 {128, 255, 0, 1, 512, 1020},
51 {256, 511, 0, 1, 1024, 2044},
52 {512, 1023, 0, 1, 2048, 4092},
53 {512, 1023, 1, 2, 4096, 8184},
54 {512, 1023, 2, 4, 8192, 16368},
55 {512, 1023, 3, 8, 16384, 32736},
56 {512, 1023, 4, 16, 32768, 65472},
57 {512, 1023, 5, 32, 65536, 130944},
58 {512, 1023, 6, 64, 131072, 261888},
59 {512, 1023, 7, 128, 262144, 523776},
60 {512, 1023, 8, 256, 524288, 1047552},
61 {512, 1023, 9, 512, 1048576, 2095104},
62 {512, 1023, 10, 1024, 2097152, 4190208},
63 {512, 1023, 11, 2048, 4194304, 8380416},
64 {512, 1023, 12, 4096, 8388608, 16760832},
65 {512, 1023, 13, 8192, 16777216, 33521664},
66 {512, 1023, 14, 16384, 33554432, 67043328},
67 {512, 1023, 15, 32768, 67108864, 134086656},
70 #define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
71 sizeof(struct ipn3ke_tm_shaper_params_range_type))
73 #define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
74 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
76 #define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
77 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
80 ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
82 #define SCRATCH_DATA 0xABCDEF
83 struct ipn3ke_tm_node *nodes;
84 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
90 #if IPN3KE_TM_SCRATCH_RW
91 uint32_t scratch_data;
92 IPN3KE_MASK_WRITE_REG(hw,
97 scratch_data = IPN3KE_MASK_READ_REG(hw,
101 if (scratch_data != SCRATCH_DATA)
104 /* alloc memory for all hierarchy nodes */
105 node_num = hw->port_num +
106 IPN3KE_TM_VT_NODE_NUM +
107 IPN3KE_TM_COS_NODE_NUM;
109 nodes = rte_zmalloc("ipn3ke_tm_nodes",
110 sizeof(struct ipn3ke_tm_node) * node_num,
115 /* alloc memory for Tail Drop Profile */
116 tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
117 sizeof(struct ipn3ke_tm_tdrop_profile) *
118 IPN3KE_TM_TDROP_PROFILE_NUM,
120 if (!tdrop_profile) {
126 hw->port_nodes = nodes;
127 hw->vt_nodes = hw->port_nodes + hw->port_num;
128 hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
129 hw->tdrop_profile = tdrop_profile;
130 hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
132 for (i = 0, nodes = hw->port_nodes;
135 nodes->node_index = i;
136 nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
137 nodes->tm_id = RTE_TM_NODE_ID_NULL;
138 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
139 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
140 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
142 nodes->parent_node = NULL;
143 nodes->shaper_profile.valid = 0;
144 nodes->tdrop_profile = NULL;
145 nodes->n_children = 0;
146 TAILQ_INIT(&nodes->children_node_list);
149 for (i = 0, nodes = hw->vt_nodes;
150 i < IPN3KE_TM_VT_NODE_NUM;
152 nodes->node_index = i;
153 nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
154 nodes->tm_id = RTE_TM_NODE_ID_NULL;
155 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
156 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
157 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
159 nodes->parent_node = NULL;
160 nodes->shaper_profile.valid = 0;
161 nodes->tdrop_profile = NULL;
162 nodes->n_children = 0;
163 TAILQ_INIT(&nodes->children_node_list);
166 for (i = 0, nodes = hw->cos_nodes;
167 i < IPN3KE_TM_COS_NODE_NUM;
169 nodes->node_index = i;
170 nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
171 nodes->tm_id = RTE_TM_NODE_ID_NULL;
172 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
173 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
174 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
176 nodes->parent_node = NULL;
177 nodes->shaper_profile.valid = 0;
178 nodes->tdrop_profile = NULL;
179 nodes->n_children = 0;
180 TAILQ_INIT(&nodes->children_node_list);
183 for (i = 0, tdrop_profile = hw->tdrop_profile;
184 i < IPN3KE_TM_TDROP_PROFILE_NUM;
185 i++, tdrop_profile++) {
186 tdrop_profile->tdrop_profile_id = i;
187 tdrop_profile->n_users = 0;
188 tdrop_profile->valid = 0;
195 ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
197 struct ipn3ke_tm_internals *tm;
198 struct ipn3ke_tm_node *port_node;
202 port_node = &rpst->hw->port_nodes[rpst->port_id];
203 tm->h.port_node = port_node;
205 tm->h.n_shaper_profiles = 0;
206 tm->h.n_tdrop_profiles = 0;
207 tm->h.n_vt_nodes = 0;
208 tm->h.n_cos_nodes = 0;
210 tm->h.port_commit_node = NULL;
211 TAILQ_INIT(&tm->h.vt_commit_node_list);
212 TAILQ_INIT(&tm->h.cos_commit_node_list);
214 tm->hierarchy_frozen = 0;
216 tm->tm_id = rpst->port_id;
219 static struct ipn3ke_tm_shaper_profile *
220 ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
221 uint32_t shaper_profile_id, struct rte_tm_error *error)
223 struct ipn3ke_tm_shaper_profile *sp = NULL;
224 uint32_t level_of_node_id;
227 /* Shaper profile ID must not be NONE. */
228 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
229 rte_tm_error_set(error,
231 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
233 rte_strerror(EINVAL));
238 level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
239 node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
241 switch (level_of_node_id) {
242 case IPN3KE_TM_NODE_LEVEL_PORT:
243 if (node_index >= hw->port_num)
244 rte_tm_error_set(error,
246 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
248 rte_strerror(EEXIST));
250 sp = &hw->port_nodes[node_index].shaper_profile;
254 case IPN3KE_TM_NODE_LEVEL_VT:
255 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
256 rte_tm_error_set(error,
258 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
260 rte_strerror(EEXIST));
262 sp = &hw->vt_nodes[node_index].shaper_profile;
266 case IPN3KE_TM_NODE_LEVEL_COS:
267 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
268 rte_tm_error_set(error,
270 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
272 rte_strerror(EEXIST));
274 sp = &hw->cos_nodes[node_index].shaper_profile;
278 rte_tm_error_set(error,
280 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
282 rte_strerror(EEXIST));
288 static struct ipn3ke_tm_tdrop_profile *
289 ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
290 uint32_t tdrop_profile_id)
292 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
294 if (tdrop_profile_id >= hw->tdrop_profile_num)
297 tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
298 if (tdrop_profile->valid)
299 return tdrop_profile;
304 static struct ipn3ke_tm_node *
305 ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
306 uint32_t node_id, uint32_t state_mask)
308 uint32_t level_of_node_id;
310 struct ipn3ke_tm_node *n;
312 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
313 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
315 switch (level_of_node_id) {
316 case IPN3KE_TM_NODE_LEVEL_PORT:
317 if (node_index >= hw->port_num)
319 n = &hw->port_nodes[node_index];
322 case IPN3KE_TM_NODE_LEVEL_VT:
323 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
325 n = &hw->vt_nodes[node_index];
328 case IPN3KE_TM_NODE_LEVEL_COS:
329 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
331 n = &hw->cos_nodes[node_index];
338 /* Check tm node status */
339 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
340 if (n->tm_id != RTE_TM_NODE_ID_NULL ||
341 n->parent_node_id != RTE_TM_NODE_ID_NULL ||
342 n->parent_node != NULL ||
344 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
346 } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
347 if (n->tm_id == RTE_TM_NODE_ID_NULL ||
348 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
349 n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
350 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
351 n->parent_node == NULL)) {
352 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
355 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
358 if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
359 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
361 else if (n->tm_id == tm_id)
370 /* Traffic manager node type get */
372 ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
373 uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
375 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
376 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
378 struct ipn3ke_tm_node *node;
382 return -rte_tm_error_set(error,
384 RTE_TM_ERROR_TYPE_UNSPECIFIED,
386 rte_strerror(EINVAL));
391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
392 node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
393 if (node_id == RTE_TM_NODE_ID_NULL ||
395 return -rte_tm_error_set(error,
397 RTE_TM_ERROR_TYPE_NODE_ID,
399 rte_strerror(EINVAL));
401 *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
406 #define WRED_SUPPORTED 0
408 #define STATS_MASK_DEFAULT \
409 (RTE_TM_STATS_N_PKTS | \
410 RTE_TM_STATS_N_BYTES | \
411 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
412 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
414 #define STATS_MASK_QUEUE \
415 (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
417 /* Traffic manager capabilities get */
419 ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
420 struct rte_tm_capabilities *cap, struct rte_tm_error *error)
423 return -rte_tm_error_set(error,
425 RTE_TM_ERROR_TYPE_CAPABILITIES,
427 rte_strerror(EINVAL));
429 /* set all the parameters to 0 first. */
430 memset(cap, 0, sizeof(*cap));
432 cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
433 cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
435 cap->non_leaf_nodes_identical = 0;
436 cap->leaf_nodes_identical = 1;
438 cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
439 cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
440 cap->shaper_private_dual_rate_n_max = 0;
441 cap->shaper_private_rate_min = 1;
442 cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
444 cap->shaper_shared_n_max = 0;
445 cap->shaper_shared_n_nodes_per_shaper_max = 0;
446 cap->shaper_shared_n_shapers_per_node_max = 0;
447 cap->shaper_shared_dual_rate_n_max = 0;
448 cap->shaper_shared_rate_min = 0;
449 cap->shaper_shared_rate_max = 0;
451 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
452 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
454 cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
455 cap->sched_sp_n_priorities_max = 3;
456 cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
457 cap->sched_wfq_n_groups_max = 1;
458 cap->sched_wfq_weight_max = UINT32_MAX;
460 cap->cman_wred_packet_mode_supported = 0;
461 cap->cman_wred_byte_mode_supported = 0;
462 cap->cman_head_drop_supported = 0;
463 cap->cman_wred_context_n_max = 0;
464 cap->cman_wred_context_private_n_max = 0;
465 cap->cman_wred_context_shared_n_max = 0;
466 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
467 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
470 * cap->mark_vlan_dei_supported = {0, 0, 0};
471 * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
472 * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
473 * cap->mark_ip_dscp_supported = {0, 0, 0};
476 cap->dynamic_update_mask = 0;
483 /* Traffic manager level capabilities get */
485 ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
486 uint32_t level_id, struct rte_tm_level_capabilities *cap,
487 struct rte_tm_error *error)
489 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
492 return -rte_tm_error_set(error,
494 RTE_TM_ERROR_TYPE_CAPABILITIES,
496 rte_strerror(EINVAL));
498 if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
499 return -rte_tm_error_set(error,
501 RTE_TM_ERROR_TYPE_LEVEL_ID,
503 rte_strerror(EINVAL));
505 /* set all the parameters to 0 first. */
506 memset(cap, 0, sizeof(*cap));
509 case IPN3KE_TM_NODE_LEVEL_PORT:
510 cap->n_nodes_max = hw->port_num;
511 cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
512 cap->n_nodes_leaf_max = 0;
513 cap->non_leaf_nodes_identical = 0;
514 cap->leaf_nodes_identical = 0;
516 cap->nonleaf.shaper_private_supported = 0;
517 cap->nonleaf.shaper_private_dual_rate_supported = 0;
518 cap->nonleaf.shaper_private_rate_min = 1;
519 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
520 cap->nonleaf.shaper_shared_n_max = 0;
522 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
523 cap->nonleaf.sched_sp_n_priorities_max = 1;
524 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
525 cap->nonleaf.sched_wfq_n_groups_max = 0;
526 cap->nonleaf.sched_wfq_weight_max = 0;
528 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
531 case IPN3KE_TM_NODE_LEVEL_VT:
532 cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
533 cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
534 cap->n_nodes_leaf_max = 0;
535 cap->non_leaf_nodes_identical = 0;
536 cap->leaf_nodes_identical = 0;
538 cap->nonleaf.shaper_private_supported = 0;
539 cap->nonleaf.shaper_private_dual_rate_supported = 0;
540 cap->nonleaf.shaper_private_rate_min = 1;
541 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
542 cap->nonleaf.shaper_shared_n_max = 0;
544 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
545 cap->nonleaf.sched_sp_n_priorities_max = 1;
546 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
547 cap->nonleaf.sched_wfq_n_groups_max = 0;
548 cap->nonleaf.sched_wfq_weight_max = 0;
550 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
553 case IPN3KE_TM_NODE_LEVEL_COS:
554 cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
555 cap->n_nodes_nonleaf_max = 0;
556 cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
557 cap->non_leaf_nodes_identical = 0;
558 cap->leaf_nodes_identical = 0;
560 cap->leaf.shaper_private_supported = 0;
561 cap->leaf.shaper_private_dual_rate_supported = 0;
562 cap->leaf.shaper_private_rate_min = 0;
563 cap->leaf.shaper_private_rate_max = 0;
564 cap->leaf.shaper_shared_n_max = 0;
566 cap->leaf.cman_head_drop_supported = 0;
567 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
568 cap->leaf.cman_wred_byte_mode_supported = 0;
569 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
570 cap->leaf.cman_wred_context_shared_n_max = 0;
572 cap->leaf.stats_mask = STATS_MASK_QUEUE;
576 return -rte_tm_error_set(error,
578 RTE_TM_ERROR_TYPE_LEVEL_ID,
580 rte_strerror(EINVAL));
587 /* Traffic manager node capabilities get */
589 ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
590 uint32_t node_id, struct rte_tm_node_capabilities *cap,
591 struct rte_tm_error *error)
593 struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
594 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
595 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
597 struct ipn3ke_tm_node *tm_node;
601 return -rte_tm_error_set(error,
603 RTE_TM_ERROR_TYPE_CAPABILITIES,
605 rte_strerror(EINVAL));
610 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
611 tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
613 return -rte_tm_error_set(error,
615 RTE_TM_ERROR_TYPE_NODE_ID,
617 rte_strerror(EINVAL));
619 if (tm_node->tm_id != representor->port_id)
620 return -rte_tm_error_set(error,
622 RTE_TM_ERROR_TYPE_NODE_ID,
624 rte_strerror(EINVAL));
626 /* set all the parameters to 0 first. */
627 memset(cap, 0, sizeof(*cap));
629 switch (tm_node->level) {
630 case IPN3KE_TM_NODE_LEVEL_PORT:
631 cap->shaper_private_supported = 1;
632 cap->shaper_private_dual_rate_supported = 0;
633 cap->shaper_private_rate_min = 1;
634 cap->shaper_private_rate_max = UINT32_MAX;
635 cap->shaper_shared_n_max = 0;
637 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
638 cap->nonleaf.sched_sp_n_priorities_max = 1;
639 cap->nonleaf.sched_wfq_n_children_per_group_max =
640 IPN3KE_TM_VT_NODE_NUM;
641 cap->nonleaf.sched_wfq_n_groups_max = 1;
642 cap->nonleaf.sched_wfq_weight_max = 1;
644 cap->stats_mask = STATS_MASK_DEFAULT;
647 case IPN3KE_TM_NODE_LEVEL_VT:
648 cap->shaper_private_supported = 1;
649 cap->shaper_private_dual_rate_supported = 0;
650 cap->shaper_private_rate_min = 1;
651 cap->shaper_private_rate_max = UINT32_MAX;
652 cap->shaper_shared_n_max = 0;
654 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
655 cap->nonleaf.sched_sp_n_priorities_max = 1;
656 cap->nonleaf.sched_wfq_n_children_per_group_max =
657 IPN3KE_TM_COS_NODE_NUM;
658 cap->nonleaf.sched_wfq_n_groups_max = 1;
659 cap->nonleaf.sched_wfq_weight_max = 1;
661 cap->stats_mask = STATS_MASK_DEFAULT;
664 case IPN3KE_TM_NODE_LEVEL_COS:
665 cap->shaper_private_supported = 0;
666 cap->shaper_private_dual_rate_supported = 0;
667 cap->shaper_private_rate_min = 0;
668 cap->shaper_private_rate_max = 0;
669 cap->shaper_shared_n_max = 0;
671 cap->leaf.cman_head_drop_supported = 0;
672 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
673 cap->leaf.cman_wred_byte_mode_supported = 0;
674 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
675 cap->leaf.cman_wred_context_shared_n_max = 0;
677 cap->stats_mask = STATS_MASK_QUEUE;
687 ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
688 struct ipn3ke_tm_shaper_profile *local_profile,
689 const struct ipn3ke_tm_shaper_params_range_type *ref_data)
692 const struct ipn3ke_tm_shaper_params_range_type *r;
695 rate = profile->peak.rate;
696 for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
697 if (rate >= r->low &&
699 local_profile->m = (rate / 4) / r->exp2;
700 local_profile->e = r->exp;
701 local_profile->rate = rate;
711 ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
712 uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
713 struct rte_tm_error *error)
715 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
716 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
717 struct ipn3ke_tm_shaper_profile *sp;
719 /* Shaper profile must not exist. */
720 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
721 if (!sp || (sp && sp->valid))
722 return -rte_tm_error_set(error,
724 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
726 rte_strerror(EEXIST));
728 /* Profile must not be NULL. */
730 return -rte_tm_error_set(error,
732 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
734 rte_strerror(EINVAL));
736 /* Peak rate: non-zero, 32-bit */
737 if (profile->peak.rate == 0 ||
738 profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
739 return -rte_tm_error_set(error,
741 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
743 rte_strerror(EINVAL));
745 /* Peak size: non-zero, 32-bit */
746 if (profile->peak.size != 0)
747 return -rte_tm_error_set(error,
749 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
751 rte_strerror(EINVAL));
753 /* Dual-rate profiles are not supported. */
754 if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
755 return -rte_tm_error_set(error,
757 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
759 rte_strerror(EINVAL));
761 /* Packet length adjust: 24 bytes */
762 if (profile->pkt_length_adjust != 0)
763 return -rte_tm_error_set(error,
765 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
767 rte_strerror(EINVAL));
769 if (ipn3ke_tm_shaper_parame_trans(profile,
771 ipn3ke_tm_shaper_params_rang)) {
772 return -rte_tm_error_set(error,
774 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
776 rte_strerror(EINVAL));
779 rte_memcpy(&sp->params, profile, sizeof(sp->params));
782 tm->h.n_shaper_profiles++;
787 /* Traffic manager shaper profile delete */
789 ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
790 uint32_t shaper_profile_id, struct rte_tm_error *error)
792 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
793 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
794 struct ipn3ke_tm_shaper_profile *sp;
797 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
798 if (!sp || (sp && !sp->valid))
799 return -rte_tm_error_set(error,
801 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
803 rte_strerror(EINVAL));
806 tm->h.n_shaper_profiles--;
812 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
813 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
814 struct rte_tm_error *error)
816 enum rte_color color;
818 /* TDROP profile ID must not be NONE. */
819 if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
820 return -rte_tm_error_set(error,
822 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
824 rte_strerror(EINVAL));
826 /* Profile must not be NULL. */
828 return -rte_tm_error_set(error,
830 RTE_TM_ERROR_TYPE_WRED_PROFILE,
832 rte_strerror(EINVAL));
834 /* TDROP profile should be in packet mode */
835 if (profile->packet_mode != 0)
836 return -rte_tm_error_set(error,
838 RTE_TM_ERROR_TYPE_WRED_PROFILE,
840 rte_strerror(ENOTSUP));
842 /* min_th <= max_th, max_th > 0 */
843 for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
844 uint64_t min_th = profile->red_params[color].min_th;
845 uint64_t max_th = profile->red_params[color].max_th;
847 if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
848 IPN3KE_TDROP_TH1_SHIFT) ||
850 return -rte_tm_error_set(error,
852 RTE_TM_ERROR_TYPE_WRED_PROFILE,
854 rte_strerror(EINVAL));
861 ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
862 struct ipn3ke_tm_tdrop_profile *tp)
865 IPN3KE_MASK_WRITE_REG(hw,
866 IPN3KE_CCB_PROFILE_MS,
869 IPN3KE_CCB_PROFILE_MS_MASK);
871 IPN3KE_MASK_WRITE_REG(hw,
872 IPN3KE_CCB_PROFILE_P,
873 tp->tdrop_profile_id,
875 IPN3KE_CCB_PROFILE_MASK);
877 IPN3KE_MASK_WRITE_REG(hw,
878 IPN3KE_CCB_PROFILE_MS,
881 IPN3KE_CCB_PROFILE_MS_MASK);
883 IPN3KE_MASK_WRITE_REG(hw,
884 IPN3KE_CCB_PROFILE_P,
885 tp->tdrop_profile_id,
887 IPN3KE_CCB_PROFILE_MASK);
893 /* Traffic manager TDROP profile add */
895 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
896 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
897 struct rte_tm_error *error)
899 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
900 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
901 struct ipn3ke_tm_tdrop_profile *tp;
906 /* Check input params */
907 status = ipn3ke_tm_tdrop_profile_check(dev,
914 /* Memory allocation */
915 tp = &hw->tdrop_profile[tdrop_profile_id];
919 min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
920 th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
921 th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
922 IPN3KE_TDROP_TH2_MASK);
925 rte_memcpy(&tp->params, profile, sizeof(tp->params));
928 tm->h.n_tdrop_profiles++;
931 ipn3ke_hw_tm_tdrop_wr(hw, tp);
936 /* Traffic manager TDROP profile delete */
938 ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
939 uint32_t tdrop_profile_id, struct rte_tm_error *error)
941 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
942 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
943 struct ipn3ke_tm_tdrop_profile *tp;
946 tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
948 return -rte_tm_error_set(error,
950 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
952 rte_strerror(EINVAL));
956 return -rte_tm_error_set(error,
958 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
960 rte_strerror(EBUSY));
964 tm->h.n_tdrop_profiles--;
967 ipn3ke_hw_tm_tdrop_wr(hw, tp);
973 ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
974 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
975 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
976 struct rte_tm_error *error)
978 uint32_t level_of_node_id;
980 uint32_t parent_level_id;
982 if (node_id == RTE_TM_NODE_ID_NULL)
983 return -rte_tm_error_set(error,
985 RTE_TM_ERROR_TYPE_NODE_ID,
987 rte_strerror(EINVAL));
989 /* priority: must be 0, 1, 2, 3 */
990 if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
991 return -rte_tm_error_set(error,
993 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
995 rte_strerror(EINVAL));
997 /* weight: must be 1 .. 255 */
998 if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
999 return -rte_tm_error_set(error,
1001 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1003 rte_strerror(EINVAL));
1005 /* check node id and parent id*/
1006 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1007 if (level_of_node_id != level_id)
1008 return -rte_tm_error_set(error,
1010 RTE_TM_ERROR_TYPE_NODE_ID,
1012 rte_strerror(EINVAL));
1013 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1014 parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1016 case IPN3KE_TM_NODE_LEVEL_PORT:
1017 if (node_index != tm_id)
1018 return -rte_tm_error_set(error,
1020 RTE_TM_ERROR_TYPE_NODE_ID,
1022 rte_strerror(EINVAL));
1023 if (parent_node_id != RTE_TM_NODE_ID_NULL)
1024 return -rte_tm_error_set(error,
1026 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1028 rte_strerror(EINVAL));
1031 case IPN3KE_TM_NODE_LEVEL_VT:
1032 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1033 return -rte_tm_error_set(error,
1035 RTE_TM_ERROR_TYPE_NODE_ID,
1037 rte_strerror(EINVAL));
1038 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1039 return -rte_tm_error_set(error,
1041 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1043 rte_strerror(EINVAL));
1046 case IPN3KE_TM_NODE_LEVEL_COS:
1047 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1048 return -rte_tm_error_set(error,
1050 RTE_TM_ERROR_TYPE_NODE_ID,
1052 rte_strerror(EINVAL));
1053 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1054 return -rte_tm_error_set(error,
1056 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1058 rte_strerror(EINVAL));
1061 return -rte_tm_error_set(error,
1063 RTE_TM_ERROR_TYPE_LEVEL_ID,
1065 rte_strerror(EINVAL));
1068 /* params: must not be NULL */
1070 return -rte_tm_error_set(error,
1072 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1074 rte_strerror(EINVAL));
1075 /* No shared shapers */
1076 if (params->n_shared_shapers != 0)
1077 return -rte_tm_error_set(error,
1079 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1081 rte_strerror(EINVAL));
1086 ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1087 uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1088 struct rte_tm_error *error)
1090 /*struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);*/
1091 uint32_t node_index;
1092 uint32_t parent_index;
1093 uint32_t parent_index1;
1095 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1096 parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1097 parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1099 case IPN3KE_TM_NODE_LEVEL_PORT:
1102 case IPN3KE_TM_NODE_LEVEL_VT:
1103 if (parent_index != tm_id)
1104 return -rte_tm_error_set(error,
1106 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1108 rte_strerror(EINVAL));
1111 case IPN3KE_TM_NODE_LEVEL_COS:
1112 if (parent_index != parent_index1)
1113 return -rte_tm_error_set(error,
1115 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1117 rte_strerror(EINVAL));
1120 return -rte_tm_error_set(error,
1122 RTE_TM_ERROR_TYPE_LEVEL_ID,
1124 rte_strerror(EINVAL));
1130 /* Traffic manager node add */
1132 ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1133 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1134 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1135 struct rte_tm_error *error)
1137 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1138 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1140 struct ipn3ke_tm_node *n, *parent_node;
1141 uint32_t node_state, state_mask;
1145 if (tm->hierarchy_frozen)
1146 return -rte_tm_error_set(error,
1148 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1150 rte_strerror(EBUSY));
1154 status = ipn3ke_tm_node_add_check_parameter(tm_id,
1165 status = ipn3ke_tm_node_add_check_mount(tm_id,
1173 /* Shaper profile ID must not be NONE. */
1174 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1175 params->shaper_profile_id != node_id)
1176 return -rte_tm_error_set(error,
1178 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1180 rte_strerror(EINVAL));
1182 /* Memory allocation */
1184 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1185 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1186 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1188 return -rte_tm_error_set(error,
1190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1192 rte_strerror(EINVAL));
1193 node_state = n->node_state;
1195 /* Check parent node */
1197 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1198 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1199 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1200 parent_node = ipn3ke_hw_tm_node_search(hw,
1205 return -rte_tm_error_set(error,
1207 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1209 rte_strerror(EINVAL));
1215 case IPN3KE_TM_NODE_LEVEL_PORT:
1216 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1218 tm->h.port_commit_node = n;
1221 case IPN3KE_TM_NODE_LEVEL_VT:
1222 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1223 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1225 parent_node->n_children++;
1227 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1229 parent_node->n_children++;
1232 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1233 n->parent_node_id = parent_node_id;
1235 n->parent_node = parent_node;
1239 case IPN3KE_TM_NODE_LEVEL_COS:
1240 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1241 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1244 parent_node->n_children++;
1245 tm->h.n_cos_nodes++;
1246 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1248 parent_node->n_children++;
1249 tm->h.n_cos_nodes++;
1251 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1252 n->parent_node_id = parent_node_id;
1254 n->parent_node = parent_node;
1258 return -rte_tm_error_set(error,
1260 RTE_TM_ERROR_TYPE_LEVEL_ID,
1262 rte_strerror(EINVAL));
1266 n->priority = priority;
1269 if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1270 params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1271 n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1272 params->leaf.wred.wred_profile_id);
1274 rte_memcpy(&n->params, params, sizeof(n->params));
1280 ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1281 uint32_t node_id, struct rte_tm_error *error)
1283 uint32_t level_of_node_id;
1284 uint32_t node_index;
1286 if (node_id == RTE_TM_NODE_ID_NULL)
1287 return -rte_tm_error_set(error,
1289 RTE_TM_ERROR_TYPE_NODE_ID,
1291 rte_strerror(EINVAL));
1293 /* check node id and parent id*/
1294 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1295 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1296 switch (level_of_node_id) {
1297 case IPN3KE_TM_NODE_LEVEL_PORT:
1298 if (node_index != tm_id)
1299 return -rte_tm_error_set(error,
1301 RTE_TM_ERROR_TYPE_NODE_ID,
1303 rte_strerror(EINVAL));
1306 case IPN3KE_TM_NODE_LEVEL_VT:
1307 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1308 return -rte_tm_error_set(error,
1310 RTE_TM_ERROR_TYPE_NODE_ID,
1312 rte_strerror(EINVAL));
1315 case IPN3KE_TM_NODE_LEVEL_COS:
1316 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1317 return -rte_tm_error_set(error,
1319 RTE_TM_ERROR_TYPE_NODE_ID,
1321 rte_strerror(EINVAL));
1324 return -rte_tm_error_set(error,
1326 RTE_TM_ERROR_TYPE_LEVEL_ID,
1328 rte_strerror(EINVAL));
1334 /* Traffic manager node delete */
1336 ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1337 uint32_t node_id, struct rte_tm_error *error)
1339 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1340 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1341 struct ipn3ke_tm_node *n, *parent_node;
1344 uint32_t level_of_node_id;
1345 uint32_t node_state;
1346 uint32_t state_mask;
1348 /* Check hierarchy changes are currently allowed */
1349 if (tm->hierarchy_frozen)
1350 return -rte_tm_error_set(error,
1352 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1354 rte_strerror(EBUSY));
1358 status = ipn3ke_tm_node_del_check_parameter(tm_id,
1364 /* Check existing */
1366 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1367 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1368 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1370 return -rte_tm_error_set(error,
1372 RTE_TM_ERROR_TYPE_NODE_ID,
1374 rte_strerror(EINVAL));
1376 if (n->n_children > 0)
1377 return -rte_tm_error_set(error,
1379 RTE_TM_ERROR_TYPE_NODE_ID,
1381 rte_strerror(EINVAL));
1383 node_state = n->node_state;
1385 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1387 /* Check parent node */
1388 if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1390 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1392 parent_node = ipn3ke_hw_tm_node_search(hw,
1397 return -rte_tm_error_set(error,
1399 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1401 rte_strerror(EINVAL));
1402 if (n->parent_node != parent_node)
1403 return -rte_tm_error_set(error,
1405 RTE_TM_ERROR_TYPE_NODE_ID,
1407 rte_strerror(EINVAL));
1412 switch (level_of_node_id) {
1413 case IPN3KE_TM_NODE_LEVEL_PORT:
1414 if (tm->h.port_node != n)
1415 return -rte_tm_error_set(error,
1417 RTE_TM_ERROR_TYPE_NODE_ID,
1419 rte_strerror(EINVAL));
1420 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1421 tm->h.port_commit_node = n;
1425 case IPN3KE_TM_NODE_LEVEL_VT:
1426 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1428 TAILQ_REMOVE(&parent_node->children_node_list,
1430 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1432 parent_node->n_children--;
1434 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1436 parent_node->n_children--;
1439 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1443 case IPN3KE_TM_NODE_LEVEL_COS:
1444 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1446 TAILQ_REMOVE(&parent_node->children_node_list,
1448 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1451 parent_node->n_children--;
1452 tm->h.n_cos_nodes--;
1453 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1455 parent_node->n_children--;
1456 tm->h.n_cos_nodes--;
1458 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1462 return -rte_tm_error_set(error,
1464 RTE_TM_ERROR_TYPE_LEVEL_ID,
1466 rte_strerror(EINVAL));
1473 ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1474 struct rte_tm_error *error)
1476 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1478 struct ipn3ke_tm_node_list *nl;
1479 struct ipn3ke_tm_node *n, *parent_node;
1483 nl = &tm->h.cos_commit_node_list;
1484 TAILQ_FOREACH(n, nl, node) {
1485 parent_node = n->parent_node;
1486 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1487 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1488 n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1489 n->tm_id != tm_id ||
1490 parent_node == NULL ||
1492 parent_node->node_state ==
1493 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1495 parent_node->node_state ==
1496 IPN3KE_TM_NODE_STATE_IDLE) ||
1497 n->shaper_profile.valid == 0) {
1498 return -rte_tm_error_set(error,
1500 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1502 rte_strerror(EINVAL));
1504 } else if (n->node_state ==
1505 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1506 if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1507 n->n_children != 0) {
1508 return -rte_tm_error_set(error,
1510 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1512 rte_strerror(EINVAL));
1514 return -rte_tm_error_set(error,
1516 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1518 rte_strerror(EINVAL));
1523 nl = &tm->h.vt_commit_node_list;
1524 TAILQ_FOREACH(n, nl, node) {
1525 parent_node = n->parent_node;
1526 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1527 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1528 n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1529 n->tm_id != tm_id ||
1530 parent_node == NULL ||
1532 parent_node->node_state ==
1533 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1535 parent_node->node_state ==
1536 IPN3KE_TM_NODE_STATE_IDLE) ||
1537 n->shaper_profile.valid == 0) {
1538 return -rte_tm_error_set(error,
1540 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1542 rte_strerror(EINVAL));
1544 } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL)
1545 return -rte_tm_error_set(error,
1547 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1549 rte_strerror(EINVAL));
1552 n = tm->h.port_commit_node;
1554 (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1555 n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1556 n->tm_id != tm_id ||
1557 n->parent_node != NULL ||
1558 n->shaper_profile.valid == 0)) {
1559 return -rte_tm_error_set(error,
1561 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1563 rte_strerror(EINVAL));
1570 ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1571 struct ipn3ke_tm_node *n,
1572 struct ipn3ke_tm_node *parent_node)
1579 case IPN3KE_TM_NODE_LEVEL_PORT:
1583 IPN3KE_MASK_WRITE_REG(hw,
1584 IPN3KE_QOS_TYPE_L3_X,
1587 IPN3KE_QOS_TYPE_MASK);
1592 IPN3KE_MASK_WRITE_REG(hw,
1593 IPN3KE_QOS_SCH_WT_L3_X,
1596 IPN3KE_QOS_SCH_WT_MASK);
1601 if (n->shaper_profile.valid)
1602 IPN3KE_MASK_WRITE_REG(hw,
1603 IPN3KE_QOS_SHAP_WT_L3_X,
1605 ((n->shaper_profile.e << 10) |
1606 n->shaper_profile.m),
1607 IPN3KE_QOS_SHAP_WT_MASK);
1610 case IPN3KE_TM_NODE_LEVEL_VT:
1614 IPN3KE_MASK_WRITE_REG(hw,
1615 IPN3KE_QOS_TYPE_L2_X,
1618 IPN3KE_QOS_TYPE_MASK);
1623 IPN3KE_MASK_WRITE_REG(hw,
1624 IPN3KE_QOS_SCH_WT_L2_X,
1627 IPN3KE_QOS_SCH_WT_MASK);
1632 if (n->shaper_profile.valid)
1633 IPN3KE_MASK_WRITE_REG(hw,
1634 IPN3KE_QOS_SHAP_WT_L2_X,
1636 ((n->shaper_profile.e << 10) |
1637 n->shaper_profile.m),
1638 IPN3KE_QOS_SHAP_WT_MASK);
1644 IPN3KE_MASK_WRITE_REG(hw,
1645 IPN3KE_QOS_MAP_L2_X,
1647 parent_node->node_index,
1648 IPN3KE_QOS_MAP_L2_MASK);
1651 case IPN3KE_TM_NODE_LEVEL_COS:
1653 * Configure Tail Drop mapping
1655 if (n->tdrop_profile && n->tdrop_profile->valid) {
1656 IPN3KE_MASK_WRITE_REG(hw,
1657 IPN3KE_CCB_QPROFILE_Q,
1659 n->tdrop_profile->tdrop_profile_id,
1660 IPN3KE_CCB_QPROFILE_MASK);
1666 IPN3KE_MASK_WRITE_REG(hw,
1667 IPN3KE_QOS_TYPE_L1_X,
1670 IPN3KE_QOS_TYPE_MASK);
1675 IPN3KE_MASK_WRITE_REG(hw,
1676 IPN3KE_QOS_SCH_WT_L1_X,
1679 IPN3KE_QOS_SCH_WT_MASK);
1684 if (n->shaper_profile.valid)
1685 IPN3KE_MASK_WRITE_REG(hw,
1686 IPN3KE_QOS_SHAP_WT_L1_X,
1688 ((n->shaper_profile.e << 10) |
1689 n->shaper_profile.m),
1690 IPN3KE_QOS_SHAP_WT_MASK);
1693 * Configure COS queue to port
1695 while (IPN3KE_MASK_READ_REG(hw,
1696 IPN3KE_QM_UID_CONFIG_CTRL,
1701 if (parent_node && parent_node->parent_node)
1702 IPN3KE_MASK_WRITE_REG(hw,
1703 IPN3KE_QM_UID_CONFIG_DATA,
1705 (1 << 8 | parent_node->parent_node->node_index),
1708 IPN3KE_MASK_WRITE_REG(hw,
1709 IPN3KE_QM_UID_CONFIG_CTRL,
1714 while (IPN3KE_MASK_READ_REG(hw,
1715 IPN3KE_QM_UID_CONFIG_CTRL,
1724 IPN3KE_MASK_WRITE_REG(hw,
1725 IPN3KE_QOS_MAP_L1_X,
1727 parent_node->node_index,
1728 IPN3KE_QOS_MAP_L1_MASK);
1739 ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1740 struct rte_tm_error *error)
1742 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1743 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1744 struct ipn3ke_tm_node_list *nl;
1745 struct ipn3ke_tm_node *n, *nn, *parent_node;
1747 n = tm->h.port_commit_node;
1749 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1750 tm->h.port_commit_node = NULL;
1752 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1753 } else if (n->node_state ==
1754 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1755 tm->h.port_commit_node = NULL;
1757 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1758 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1760 n->tm_id = RTE_TM_NODE_ID_NULL;
1762 return -rte_tm_error_set(error,
1764 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1766 rte_strerror(EINVAL));
1768 parent_node = n->parent_node;
1769 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1772 nl = &tm->h.vt_commit_node_list;
1773 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1774 nn = TAILQ_NEXT(n, node);
1775 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1776 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1777 parent_node = n->parent_node;
1778 TAILQ_REMOVE(nl, n, node);
1779 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1781 } else if (n->node_state ==
1782 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1783 parent_node = n->parent_node;
1784 TAILQ_REMOVE(nl, n, node);
1786 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1787 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1788 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1790 n->tm_id = RTE_TM_NODE_ID_NULL;
1791 n->parent_node = NULL;
1793 return -rte_tm_error_set(error,
1795 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1797 rte_strerror(EINVAL));
1799 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1802 nl = &tm->h.cos_commit_node_list;
1803 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1804 nn = TAILQ_NEXT(n, node);
1805 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1806 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1807 parent_node = n->parent_node;
1808 TAILQ_REMOVE(nl, n, node);
1809 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1811 } else if (n->node_state ==
1812 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1813 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1814 parent_node = n->parent_node;
1815 TAILQ_REMOVE(nl, n, node);
1817 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1818 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1819 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1821 n->tm_id = RTE_TM_NODE_ID_NULL;
1822 n->parent_node = NULL;
1824 if (n->tdrop_profile)
1825 n->tdrop_profile->n_users--;
1827 return -rte_tm_error_set(error,
1829 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1831 rte_strerror(EINVAL));
1833 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1840 ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1842 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1843 struct ipn3ke_tm_node_list *nl;
1844 struct ipn3ke_tm_node *n;
1845 struct ipn3ke_tm_node *nn;
1847 n = tm->h.port_commit_node;
1849 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1850 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1852 n->tm_id = RTE_TM_NODE_ID_NULL;
1855 tm->h.port_commit_node = NULL;
1858 nl = &tm->h.vt_commit_node_list;
1859 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1860 nn = TAILQ_NEXT(n, node);
1862 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1863 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1864 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1866 n->tm_id = RTE_TM_NODE_ID_NULL;
1867 n->parent_node = NULL;
1871 TAILQ_REMOVE(nl, n, node);
1874 nl = &tm->h.cos_commit_node_list;
1875 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1876 nn = TAILQ_NEXT(n, node);
1878 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1879 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1880 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1882 n->tm_id = RTE_TM_NODE_ID_NULL;
1883 n->parent_node = NULL;
1884 tm->h.n_cos_nodes--;
1886 TAILQ_REMOVE(nl, n, node);
1893 ipn3ke_tm_show(struct rte_eth_dev *dev)
1895 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1897 struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1898 struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1899 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1906 IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1908 port_n = tm->h.port_node;
1909 IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1910 str_state[port_n->node_state]);
1912 vt_nl = &tm->h.port_node->children_node_list;
1913 TAILQ_FOREACH(vt_n, vt_nl, node) {
1914 cos_nl = &vt_n->children_node_list;
1915 IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
1916 TAILQ_FOREACH(cos_n, cos_nl, node) {
1917 if (cos_n->parent_node_id !=
1918 (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1919 IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1921 str_state[cos_n->node_state]);
1923 IPN3KE_AFU_PMD_DEBUG("\n");
1928 ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1930 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1932 struct ipn3ke_tm_node_list *nl;
1933 struct ipn3ke_tm_node *n;
1934 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1941 IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1942 n = tm->h.port_commit_node;
1943 IPN3KE_AFU_PMD_DEBUG("Port: ");
1945 IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1947 str_state[n->node_state]);
1948 IPN3KE_AFU_PMD_DEBUG("\n");
1950 nl = &tm->h.vt_commit_node_list;
1951 IPN3KE_AFU_PMD_DEBUG("VT : ");
1952 TAILQ_FOREACH(n, nl, node) {
1953 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1955 str_state[n->node_state]);
1957 IPN3KE_AFU_PMD_DEBUG("\n");
1959 nl = &tm->h.cos_commit_node_list;
1960 IPN3KE_AFU_PMD_DEBUG("COS : ");
1961 TAILQ_FOREACH(n, nl, node) {
1962 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1964 str_state[n->node_state]);
1966 IPN3KE_AFU_PMD_DEBUG("\n");
1969 /* Traffic manager hierarchy commit */
1971 ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
1972 int clear_on_fail, struct rte_tm_error *error)
1974 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1978 if (tm->hierarchy_frozen)
1979 return -rte_tm_error_set(error,
1981 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1983 rte_strerror(EBUSY));
1985 ipn3ke_tm_show_commmit(dev);
1987 status = ipn3ke_tm_hierarchy_commit_check(dev, error);
1990 ipn3ke_tm_hierarchy_commit_clear(dev);
1994 ipn3ke_tm_hierarchy_hw_commit(dev, error);
1995 ipn3ke_tm_show(dev);
2000 const struct rte_tm_ops ipn3ke_tm_ops = {
2001 .node_type_get = ipn3ke_pmd_tm_node_type_get,
2002 .capabilities_get = ipn3ke_tm_capabilities_get,
2003 .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
2004 .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
2006 .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2007 .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2008 .shared_wred_context_add_update = NULL,
2009 .shared_wred_context_delete = NULL,
2011 .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2012 .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2013 .shared_shaper_add_update = NULL,
2014 .shared_shaper_delete = NULL,
2016 .node_add = ipn3ke_tm_node_add,
2017 .node_delete = ipn3ke_pmd_tm_node_delete,
2018 .node_suspend = NULL,
2019 .node_resume = NULL,
2020 .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2022 .node_parent_update = NULL,
2023 .node_shaper_update = NULL,
2024 .node_shared_shaper_update = NULL,
2025 .node_stats_update = NULL,
2026 .node_wfq_weight_mode_update = NULL,
2027 .node_cman_update = NULL,
2028 .node_wred_context_update = NULL,
2029 .node_shared_wred_context_update = NULL,
2031 .node_stats_read = NULL,
2035 ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2038 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2039 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2040 struct rte_eth_dev *i40e_pf_eth;
2041 const struct rte_tm_ops *ops;
2047 *(const void **)arg = &ipn3ke_tm_ops;
2048 } else if (rpst->i40e_pf_eth) {
2049 i40e_pf_eth = rpst->i40e_pf_eth;
2050 if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2051 i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2056 *(const void **)arg = ops;