1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev.h>
12 #include <rte_malloc.h>
13 #include <rte_tm_driver.h>
16 #include <rte_sched.h>
17 #include <ethdev_driver.h>
20 #include <rte_rawdev.h>
21 #include <rte_rawdev_pmd.h>
22 #include <rte_bus_ifpga.h>
23 #include <ifpga_logs.h>
25 #include "ipn3ke_rawdev_api.h"
26 #include "ipn3ke_flow.h"
27 #include "ipn3ke_logs.h"
28 #include "ipn3ke_ethdev.h"
30 #define BYTES_IN_MBPS (1000 * 1000 / 8)
31 #define SUBPORT_TC_PERIOD 10
32 #define PIPE_TC_PERIOD 40
34 struct ipn3ke_tm_shaper_params_range_type {
42 struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
45 { 4, 7, 0, 1, 16, 28},
46 { 8, 15, 0, 1, 32, 60},
47 { 16, 31, 0, 1, 64, 124},
48 { 32, 63, 0, 1, 128, 252},
49 { 64, 127, 0, 1, 256, 508},
50 {128, 255, 0, 1, 512, 1020},
51 {256, 511, 0, 1, 1024, 2044},
52 {512, 1023, 0, 1, 2048, 4092},
53 {512, 1023, 1, 2, 4096, 8184},
54 {512, 1023, 2, 4, 8192, 16368},
55 {512, 1023, 3, 8, 16384, 32736},
56 {512, 1023, 4, 16, 32768, 65472},
57 {512, 1023, 5, 32, 65536, 130944},
58 {512, 1023, 6, 64, 131072, 261888},
59 {512, 1023, 7, 128, 262144, 523776},
60 {512, 1023, 8, 256, 524288, 1047552},
61 {512, 1023, 9, 512, 1048576, 2095104},
62 {512, 1023, 10, 1024, 2097152, 4190208},
63 {512, 1023, 11, 2048, 4194304, 8380416},
64 {512, 1023, 12, 4096, 8388608, 16760832},
65 {512, 1023, 13, 8192, 16777216, 33521664},
66 {512, 1023, 14, 16384, 33554432, 67043328},
67 {512, 1023, 15, 32768, 67108864, 134086656},
70 #define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
71 sizeof(struct ipn3ke_tm_shaper_params_range_type))
73 #define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
74 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
76 #define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
77 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
80 ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
82 #define SCRATCH_DATA 0xABCDEF
83 struct ipn3ke_tm_node *nodes;
84 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
90 #if IPN3KE_TM_SCRATCH_RW
91 uint32_t scratch_data;
92 IPN3KE_MASK_WRITE_REG(hw,
97 scratch_data = IPN3KE_MASK_READ_REG(hw,
101 if (scratch_data != SCRATCH_DATA)
104 /* alloc memory for all hierarchy nodes */
105 node_num = hw->port_num +
106 IPN3KE_TM_VT_NODE_NUM +
107 IPN3KE_TM_COS_NODE_NUM;
109 nodes = rte_zmalloc("ipn3ke_tm_nodes",
110 sizeof(struct ipn3ke_tm_node) * node_num,
115 /* alloc memory for Tail Drop Profile */
116 tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
117 sizeof(struct ipn3ke_tm_tdrop_profile) *
118 IPN3KE_TM_TDROP_PROFILE_NUM,
120 if (!tdrop_profile) {
126 hw->port_nodes = nodes;
127 hw->vt_nodes = hw->port_nodes + hw->port_num;
128 hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
129 hw->tdrop_profile = tdrop_profile;
130 hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
132 for (i = 0, nodes = hw->port_nodes;
135 nodes->node_index = i;
136 nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
137 nodes->tm_id = RTE_TM_NODE_ID_NULL;
138 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
139 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
140 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
142 nodes->parent_node = NULL;
143 nodes->shaper_profile.valid = 0;
144 nodes->tdrop_profile = NULL;
145 nodes->n_children = 0;
146 TAILQ_INIT(&nodes->children_node_list);
149 for (i = 0, nodes = hw->vt_nodes;
150 i < IPN3KE_TM_VT_NODE_NUM;
152 nodes->node_index = i;
153 nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
154 nodes->tm_id = RTE_TM_NODE_ID_NULL;
155 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
156 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
157 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
159 nodes->parent_node = NULL;
160 nodes->shaper_profile.valid = 0;
161 nodes->tdrop_profile = NULL;
162 nodes->n_children = 0;
163 TAILQ_INIT(&nodes->children_node_list);
166 for (i = 0, nodes = hw->cos_nodes;
167 i < IPN3KE_TM_COS_NODE_NUM;
169 nodes->node_index = i;
170 nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
171 nodes->tm_id = RTE_TM_NODE_ID_NULL;
172 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
173 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
174 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
176 nodes->parent_node = NULL;
177 nodes->shaper_profile.valid = 0;
178 nodes->tdrop_profile = NULL;
179 nodes->n_children = 0;
180 TAILQ_INIT(&nodes->children_node_list);
183 for (i = 0, tdrop_profile = hw->tdrop_profile;
184 i < IPN3KE_TM_TDROP_PROFILE_NUM;
185 i++, tdrop_profile++) {
186 tdrop_profile->tdrop_profile_id = i;
187 tdrop_profile->n_users = 0;
188 tdrop_profile->valid = 0;
195 ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
197 struct ipn3ke_tm_internals *tm;
198 struct ipn3ke_tm_node *port_node;
202 port_node = &rpst->hw->port_nodes[rpst->port_id];
203 tm->h.port_node = port_node;
205 tm->h.n_shaper_profiles = 0;
206 tm->h.n_tdrop_profiles = 0;
207 tm->h.n_vt_nodes = 0;
208 tm->h.n_cos_nodes = 0;
210 tm->h.port_commit_node = NULL;
211 TAILQ_INIT(&tm->h.vt_commit_node_list);
212 TAILQ_INIT(&tm->h.cos_commit_node_list);
214 tm->hierarchy_frozen = 0;
216 tm->tm_id = rpst->port_id;
219 static struct ipn3ke_tm_shaper_profile *
220 ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
221 uint32_t shaper_profile_id, struct rte_tm_error *error)
223 struct ipn3ke_tm_shaper_profile *sp = NULL;
224 uint32_t level_of_node_id;
227 /* Shaper profile ID must not be NONE. */
228 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
229 rte_tm_error_set(error,
231 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
233 rte_strerror(EINVAL));
238 level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
239 node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
241 switch (level_of_node_id) {
242 case IPN3KE_TM_NODE_LEVEL_PORT:
243 if (node_index >= hw->port_num)
244 rte_tm_error_set(error,
246 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
248 rte_strerror(EEXIST));
250 sp = &hw->port_nodes[node_index].shaper_profile;
254 case IPN3KE_TM_NODE_LEVEL_VT:
255 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
256 rte_tm_error_set(error,
258 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
260 rte_strerror(EEXIST));
262 sp = &hw->vt_nodes[node_index].shaper_profile;
266 case IPN3KE_TM_NODE_LEVEL_COS:
267 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
268 rte_tm_error_set(error,
270 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
272 rte_strerror(EEXIST));
274 sp = &hw->cos_nodes[node_index].shaper_profile;
278 rte_tm_error_set(error,
280 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
282 rte_strerror(EEXIST));
288 static struct ipn3ke_tm_tdrop_profile *
289 ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
290 uint32_t tdrop_profile_id)
292 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
294 if (tdrop_profile_id >= hw->tdrop_profile_num)
297 tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
298 if (tdrop_profile->valid)
299 return tdrop_profile;
304 static struct ipn3ke_tm_node *
305 ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
306 uint32_t node_id, uint32_t state_mask)
308 uint32_t level_of_node_id;
310 struct ipn3ke_tm_node *n;
312 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
313 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
315 switch (level_of_node_id) {
316 case IPN3KE_TM_NODE_LEVEL_PORT:
317 if (node_index >= hw->port_num)
319 n = &hw->port_nodes[node_index];
322 case IPN3KE_TM_NODE_LEVEL_VT:
323 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
325 n = &hw->vt_nodes[node_index];
328 case IPN3KE_TM_NODE_LEVEL_COS:
329 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
331 n = &hw->cos_nodes[node_index];
338 /* Check tm node status */
339 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
340 if (n->tm_id != RTE_TM_NODE_ID_NULL ||
341 n->parent_node_id != RTE_TM_NODE_ID_NULL ||
342 n->parent_node != NULL ||
344 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
346 } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
347 if (n->tm_id == RTE_TM_NODE_ID_NULL ||
348 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
349 n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
350 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
351 n->parent_node == NULL)) {
352 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
355 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
358 if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
359 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
361 else if (n->tm_id == tm_id)
370 /* Traffic manager node type get */
372 ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
373 uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
375 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
376 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
378 struct ipn3ke_tm_node *node;
382 return -rte_tm_error_set(error,
384 RTE_TM_ERROR_TYPE_UNSPECIFIED,
386 rte_strerror(EINVAL));
391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
392 node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
393 if (node_id == RTE_TM_NODE_ID_NULL ||
395 return -rte_tm_error_set(error,
397 RTE_TM_ERROR_TYPE_NODE_ID,
399 rte_strerror(EINVAL));
401 *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
406 #define WRED_SUPPORTED 0
408 #define STATS_MASK_DEFAULT \
409 (RTE_TM_STATS_N_PKTS | \
410 RTE_TM_STATS_N_BYTES | \
411 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
412 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
414 #define STATS_MASK_QUEUE \
415 (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
417 /* Traffic manager capabilities get */
419 ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
420 struct rte_tm_capabilities *cap, struct rte_tm_error *error)
423 return -rte_tm_error_set(error,
425 RTE_TM_ERROR_TYPE_CAPABILITIES,
427 rte_strerror(EINVAL));
429 /* set all the parameters to 0 first. */
430 memset(cap, 0, sizeof(*cap));
432 cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
433 cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
435 cap->non_leaf_nodes_identical = 0;
436 cap->leaf_nodes_identical = 1;
438 cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
439 cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
440 cap->shaper_private_dual_rate_n_max = 0;
441 cap->shaper_private_rate_min = 1;
442 cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
443 cap->shaper_private_packet_mode_supported = 0;
444 cap->shaper_private_byte_mode_supported = 1;
446 cap->shaper_shared_n_max = 0;
447 cap->shaper_shared_n_nodes_per_shaper_max = 0;
448 cap->shaper_shared_n_shapers_per_node_max = 0;
449 cap->shaper_shared_dual_rate_n_max = 0;
450 cap->shaper_shared_rate_min = 0;
451 cap->shaper_shared_rate_max = 0;
452 cap->shaper_shared_packet_mode_supported = 0;
453 cap->shaper_shared_byte_mode_supported = 0;
455 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
456 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
458 cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
459 cap->sched_sp_n_priorities_max = 3;
460 cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
461 cap->sched_wfq_n_groups_max = 1;
462 cap->sched_wfq_weight_max = UINT32_MAX;
463 cap->sched_wfq_packet_mode_supported = 0;
464 cap->sched_wfq_byte_mode_supported = 1;
466 cap->cman_wred_packet_mode_supported = 0;
467 cap->cman_wred_byte_mode_supported = 0;
468 cap->cman_head_drop_supported = 0;
469 cap->cman_wred_context_n_max = 0;
470 cap->cman_wred_context_private_n_max = 0;
471 cap->cman_wred_context_shared_n_max = 0;
472 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
473 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
476 * cap->mark_vlan_dei_supported = {0, 0, 0};
477 * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
478 * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
479 * cap->mark_ip_dscp_supported = {0, 0, 0};
482 cap->dynamic_update_mask = 0;
489 /* Traffic manager level capabilities get */
491 ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
492 uint32_t level_id, struct rte_tm_level_capabilities *cap,
493 struct rte_tm_error *error)
495 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
498 return -rte_tm_error_set(error,
500 RTE_TM_ERROR_TYPE_CAPABILITIES,
502 rte_strerror(EINVAL));
504 if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
505 return -rte_tm_error_set(error,
507 RTE_TM_ERROR_TYPE_LEVEL_ID,
509 rte_strerror(EINVAL));
511 /* set all the parameters to 0 first. */
512 memset(cap, 0, sizeof(*cap));
515 case IPN3KE_TM_NODE_LEVEL_PORT:
516 cap->n_nodes_max = hw->port_num;
517 cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
518 cap->n_nodes_leaf_max = 0;
519 cap->non_leaf_nodes_identical = 0;
520 cap->leaf_nodes_identical = 0;
522 cap->nonleaf.shaper_private_supported = 0;
523 cap->nonleaf.shaper_private_dual_rate_supported = 0;
524 cap->nonleaf.shaper_private_rate_min = 1;
525 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
526 cap->nonleaf.shaper_private_packet_mode_supported = 0;
527 cap->nonleaf.shaper_private_byte_mode_supported = 1;
528 cap->nonleaf.shaper_shared_n_max = 0;
529 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
530 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
532 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
533 cap->nonleaf.sched_sp_n_priorities_max = 1;
534 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
535 cap->nonleaf.sched_wfq_n_groups_max = 0;
536 cap->nonleaf.sched_wfq_weight_max = 0;
537 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
538 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
540 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
543 case IPN3KE_TM_NODE_LEVEL_VT:
544 cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
545 cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
546 cap->n_nodes_leaf_max = 0;
547 cap->non_leaf_nodes_identical = 0;
548 cap->leaf_nodes_identical = 0;
550 cap->nonleaf.shaper_private_supported = 0;
551 cap->nonleaf.shaper_private_dual_rate_supported = 0;
552 cap->nonleaf.shaper_private_rate_min = 1;
553 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
554 cap->nonleaf.shaper_private_packet_mode_supported = 0;
555 cap->nonleaf.shaper_private_byte_mode_supported = 1;
556 cap->nonleaf.shaper_shared_n_max = 0;
557 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
558 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
560 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
561 cap->nonleaf.sched_sp_n_priorities_max = 1;
562 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
563 cap->nonleaf.sched_wfq_n_groups_max = 0;
564 cap->nonleaf.sched_wfq_weight_max = 0;
565 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
566 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
568 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
571 case IPN3KE_TM_NODE_LEVEL_COS:
572 cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
573 cap->n_nodes_nonleaf_max = 0;
574 cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
575 cap->non_leaf_nodes_identical = 0;
576 cap->leaf_nodes_identical = 0;
578 cap->leaf.shaper_private_supported = 0;
579 cap->leaf.shaper_private_dual_rate_supported = 0;
580 cap->leaf.shaper_private_rate_min = 0;
581 cap->leaf.shaper_private_rate_max = 0;
582 cap->leaf.shaper_private_packet_mode_supported = 0;
583 cap->leaf.shaper_private_byte_mode_supported = 1;
584 cap->leaf.shaper_shared_n_max = 0;
585 cap->leaf.shaper_shared_packet_mode_supported = 0;
586 cap->leaf.shaper_shared_byte_mode_supported = 0;
588 cap->leaf.cman_head_drop_supported = 0;
589 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
590 cap->leaf.cman_wred_byte_mode_supported = 0;
591 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
592 cap->leaf.cman_wred_context_shared_n_max = 0;
594 cap->leaf.stats_mask = STATS_MASK_QUEUE;
598 return -rte_tm_error_set(error,
600 RTE_TM_ERROR_TYPE_LEVEL_ID,
602 rte_strerror(EINVAL));
609 /* Traffic manager node capabilities get */
611 ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
612 uint32_t node_id, struct rte_tm_node_capabilities *cap,
613 struct rte_tm_error *error)
615 struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
616 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
617 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
619 struct ipn3ke_tm_node *tm_node;
623 return -rte_tm_error_set(error,
625 RTE_TM_ERROR_TYPE_CAPABILITIES,
627 rte_strerror(EINVAL));
632 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
633 tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
635 return -rte_tm_error_set(error,
637 RTE_TM_ERROR_TYPE_NODE_ID,
639 rte_strerror(EINVAL));
641 if (tm_node->tm_id != representor->port_id)
642 return -rte_tm_error_set(error,
644 RTE_TM_ERROR_TYPE_NODE_ID,
646 rte_strerror(EINVAL));
648 /* set all the parameters to 0 first. */
649 memset(cap, 0, sizeof(*cap));
651 switch (tm_node->level) {
652 case IPN3KE_TM_NODE_LEVEL_PORT:
653 cap->shaper_private_supported = 1;
654 cap->shaper_private_dual_rate_supported = 0;
655 cap->shaper_private_rate_min = 1;
656 cap->shaper_private_rate_max = UINT32_MAX;
657 cap->shaper_private_packet_mode_supported = 0;
658 cap->shaper_private_byte_mode_supported = 1;
659 cap->shaper_shared_n_max = 0;
660 cap->shaper_shared_packet_mode_supported = 0;
661 cap->shaper_shared_byte_mode_supported = 0;
663 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
664 cap->nonleaf.sched_sp_n_priorities_max = 1;
665 cap->nonleaf.sched_wfq_n_children_per_group_max =
666 IPN3KE_TM_VT_NODE_NUM;
667 cap->nonleaf.sched_wfq_n_groups_max = 1;
668 cap->nonleaf.sched_wfq_weight_max = 1;
669 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
670 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
672 cap->stats_mask = STATS_MASK_DEFAULT;
675 case IPN3KE_TM_NODE_LEVEL_VT:
676 cap->shaper_private_supported = 1;
677 cap->shaper_private_dual_rate_supported = 0;
678 cap->shaper_private_rate_min = 1;
679 cap->shaper_private_rate_max = UINT32_MAX;
680 cap->shaper_private_packet_mode_supported = 0;
681 cap->shaper_private_byte_mode_supported = 1;
682 cap->shaper_shared_n_max = 0;
683 cap->shaper_shared_packet_mode_supported = 0;
684 cap->shaper_shared_byte_mode_supported = 0;
686 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
687 cap->nonleaf.sched_sp_n_priorities_max = 1;
688 cap->nonleaf.sched_wfq_n_children_per_group_max =
689 IPN3KE_TM_COS_NODE_NUM;
690 cap->nonleaf.sched_wfq_n_groups_max = 1;
691 cap->nonleaf.sched_wfq_weight_max = 1;
692 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
693 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
695 cap->stats_mask = STATS_MASK_DEFAULT;
698 case IPN3KE_TM_NODE_LEVEL_COS:
699 cap->shaper_private_supported = 0;
700 cap->shaper_private_dual_rate_supported = 0;
701 cap->shaper_private_rate_min = 0;
702 cap->shaper_private_rate_max = 0;
703 cap->shaper_private_packet_mode_supported = 0;
704 cap->shaper_private_byte_mode_supported = 0;
705 cap->shaper_shared_n_max = 0;
706 cap->shaper_shared_packet_mode_supported = 0;
707 cap->shaper_shared_byte_mode_supported = 0;
709 cap->leaf.cman_head_drop_supported = 0;
710 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
711 cap->leaf.cman_wred_byte_mode_supported = 0;
712 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
713 cap->leaf.cman_wred_context_shared_n_max = 0;
715 cap->stats_mask = STATS_MASK_QUEUE;
725 ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
726 struct ipn3ke_tm_shaper_profile *local_profile,
727 const struct ipn3ke_tm_shaper_params_range_type *ref_data)
730 const struct ipn3ke_tm_shaper_params_range_type *r;
733 rate = profile->peak.rate;
734 for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
735 if (rate >= r->low &&
737 local_profile->m = (rate / 4) / r->exp2;
738 local_profile->e = r->exp;
739 local_profile->rate = rate;
749 ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
750 uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
751 struct rte_tm_error *error)
753 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
754 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
755 struct ipn3ke_tm_shaper_profile *sp;
757 /* Shaper profile must not exist. */
758 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
759 if (!sp || (sp && sp->valid))
760 return -rte_tm_error_set(error,
762 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
764 rte_strerror(EEXIST));
766 /* Profile must not be NULL. */
768 return -rte_tm_error_set(error,
770 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
772 rte_strerror(EINVAL));
774 /* Peak rate: non-zero, 32-bit */
775 if (profile->peak.rate == 0 ||
776 profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
777 return -rte_tm_error_set(error,
779 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
781 rte_strerror(EINVAL));
783 /* Peak size: non-zero, 32-bit */
784 if (profile->peak.size != 0)
785 return -rte_tm_error_set(error,
787 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
789 rte_strerror(EINVAL));
791 /* Dual-rate profiles are not supported. */
792 if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
793 return -rte_tm_error_set(error,
795 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
797 rte_strerror(EINVAL));
799 /* Packet length adjust: 24 bytes */
800 if (profile->pkt_length_adjust != 0)
801 return -rte_tm_error_set(error,
803 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
805 rte_strerror(EINVAL));
807 if (ipn3ke_tm_shaper_parame_trans(profile,
809 ipn3ke_tm_shaper_params_rang)) {
810 return -rte_tm_error_set(error,
812 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
814 rte_strerror(EINVAL));
817 rte_memcpy(&sp->params, profile, sizeof(sp->params));
820 tm->h.n_shaper_profiles++;
825 /* Traffic manager shaper profile delete */
827 ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
828 uint32_t shaper_profile_id, struct rte_tm_error *error)
830 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
831 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
832 struct ipn3ke_tm_shaper_profile *sp;
835 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
836 if (!sp || (sp && !sp->valid))
837 return -rte_tm_error_set(error,
839 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
841 rte_strerror(EINVAL));
844 tm->h.n_shaper_profiles--;
850 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
851 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
852 struct rte_tm_error *error)
854 enum rte_color color;
856 /* TDROP profile ID must not be NONE. */
857 if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
858 return -rte_tm_error_set(error,
860 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
862 rte_strerror(EINVAL));
864 /* Profile must not be NULL. */
866 return -rte_tm_error_set(error,
868 RTE_TM_ERROR_TYPE_WRED_PROFILE,
870 rte_strerror(EINVAL));
872 /* TDROP profile should be in packet mode */
873 if (profile->packet_mode != 0)
874 return -rte_tm_error_set(error,
876 RTE_TM_ERROR_TYPE_WRED_PROFILE,
878 rte_strerror(ENOTSUP));
880 /* min_th <= max_th, max_th > 0 */
881 for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
882 uint64_t min_th = profile->red_params[color].min_th;
883 uint64_t max_th = profile->red_params[color].max_th;
885 if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
886 IPN3KE_TDROP_TH1_SHIFT) ||
888 return -rte_tm_error_set(error,
890 RTE_TM_ERROR_TYPE_WRED_PROFILE,
892 rte_strerror(EINVAL));
899 ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
900 struct ipn3ke_tm_tdrop_profile *tp)
903 IPN3KE_MASK_WRITE_REG(hw,
904 IPN3KE_CCB_PROFILE_MS,
907 IPN3KE_CCB_PROFILE_MS_MASK);
909 IPN3KE_MASK_WRITE_REG(hw,
910 IPN3KE_CCB_PROFILE_P,
911 tp->tdrop_profile_id,
913 IPN3KE_CCB_PROFILE_MASK);
915 IPN3KE_MASK_WRITE_REG(hw,
916 IPN3KE_CCB_PROFILE_MS,
919 IPN3KE_CCB_PROFILE_MS_MASK);
921 IPN3KE_MASK_WRITE_REG(hw,
922 IPN3KE_CCB_PROFILE_P,
923 tp->tdrop_profile_id,
925 IPN3KE_CCB_PROFILE_MASK);
931 /* Traffic manager TDROP profile add */
933 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
934 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
935 struct rte_tm_error *error)
937 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
938 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
939 struct ipn3ke_tm_tdrop_profile *tp;
944 /* Check input params */
945 status = ipn3ke_tm_tdrop_profile_check(dev,
952 /* Memory allocation */
953 tp = &hw->tdrop_profile[tdrop_profile_id];
957 min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
958 th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
959 th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
960 IPN3KE_TDROP_TH2_MASK);
963 rte_memcpy(&tp->params, profile, sizeof(tp->params));
966 tm->h.n_tdrop_profiles++;
969 ipn3ke_hw_tm_tdrop_wr(hw, tp);
974 /* Traffic manager TDROP profile delete */
976 ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
977 uint32_t tdrop_profile_id, struct rte_tm_error *error)
979 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
980 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
981 struct ipn3ke_tm_tdrop_profile *tp;
984 tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
986 return -rte_tm_error_set(error,
988 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
990 rte_strerror(EINVAL));
994 return -rte_tm_error_set(error,
996 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
998 rte_strerror(EBUSY));
1002 tm->h.n_tdrop_profiles--;
1005 ipn3ke_hw_tm_tdrop_wr(hw, tp);
1011 ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
1012 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1013 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1014 struct rte_tm_error *error)
1016 uint32_t level_of_node_id;
1017 uint32_t node_index;
1018 uint32_t parent_level_id;
1020 if (node_id == RTE_TM_NODE_ID_NULL)
1021 return -rte_tm_error_set(error,
1023 RTE_TM_ERROR_TYPE_NODE_ID,
1025 rte_strerror(EINVAL));
1027 /* priority: must be 0, 1, 2, 3 */
1028 if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
1029 return -rte_tm_error_set(error,
1031 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1033 rte_strerror(EINVAL));
1035 /* weight: must be 1 .. 255 */
1036 if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
1037 return -rte_tm_error_set(error,
1039 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1041 rte_strerror(EINVAL));
1043 /* check node id and parent id*/
1044 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1045 if (level_of_node_id != level_id)
1046 return -rte_tm_error_set(error,
1048 RTE_TM_ERROR_TYPE_NODE_ID,
1050 rte_strerror(EINVAL));
1051 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1052 parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1054 case IPN3KE_TM_NODE_LEVEL_PORT:
1055 if (node_index != tm_id)
1056 return -rte_tm_error_set(error,
1058 RTE_TM_ERROR_TYPE_NODE_ID,
1060 rte_strerror(EINVAL));
1061 if (parent_node_id != RTE_TM_NODE_ID_NULL)
1062 return -rte_tm_error_set(error,
1064 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1066 rte_strerror(EINVAL));
1069 case IPN3KE_TM_NODE_LEVEL_VT:
1070 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1071 return -rte_tm_error_set(error,
1073 RTE_TM_ERROR_TYPE_NODE_ID,
1075 rte_strerror(EINVAL));
1076 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1077 return -rte_tm_error_set(error,
1079 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1081 rte_strerror(EINVAL));
1084 case IPN3KE_TM_NODE_LEVEL_COS:
1085 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1086 return -rte_tm_error_set(error,
1088 RTE_TM_ERROR_TYPE_NODE_ID,
1090 rte_strerror(EINVAL));
1091 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1092 return -rte_tm_error_set(error,
1094 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1096 rte_strerror(EINVAL));
1099 return -rte_tm_error_set(error,
1101 RTE_TM_ERROR_TYPE_LEVEL_ID,
1103 rte_strerror(EINVAL));
1106 /* params: must not be NULL */
1108 return -rte_tm_error_set(error,
1110 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1112 rte_strerror(EINVAL));
1113 /* No shared shapers */
1114 if (params->n_shared_shapers != 0)
1115 return -rte_tm_error_set(error,
1117 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1119 rte_strerror(EINVAL));
1124 ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1125 uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1126 struct rte_tm_error *error)
1128 uint32_t node_index;
1129 uint32_t parent_index;
1130 uint32_t parent_index1;
1132 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1133 parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1134 parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1136 case IPN3KE_TM_NODE_LEVEL_PORT:
1139 case IPN3KE_TM_NODE_LEVEL_VT:
1140 if (parent_index != tm_id)
1141 return -rte_tm_error_set(error,
1143 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1145 rte_strerror(EINVAL));
1148 case IPN3KE_TM_NODE_LEVEL_COS:
1149 if (parent_index != parent_index1)
1150 return -rte_tm_error_set(error,
1152 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1154 rte_strerror(EINVAL));
1157 return -rte_tm_error_set(error,
1159 RTE_TM_ERROR_TYPE_LEVEL_ID,
1161 rte_strerror(EINVAL));
1167 /* Traffic manager node add */
1169 ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1170 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1171 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1172 struct rte_tm_error *error)
1174 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1175 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1177 struct ipn3ke_tm_node *n, *parent_node;
1178 uint32_t node_state, state_mask;
1182 if (tm->hierarchy_frozen)
1183 return -rte_tm_error_set(error,
1185 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1187 rte_strerror(EBUSY));
1191 status = ipn3ke_tm_node_add_check_parameter(tm_id,
1202 status = ipn3ke_tm_node_add_check_mount(tm_id,
1210 /* Shaper profile ID must not be NONE. */
1211 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1212 params->shaper_profile_id != node_id)
1213 return -rte_tm_error_set(error,
1215 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1217 rte_strerror(EINVAL));
1219 /* Memory allocation */
1221 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1222 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1223 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1225 return -rte_tm_error_set(error,
1227 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1229 rte_strerror(EINVAL));
1230 node_state = n->node_state;
1232 /* Check parent node */
1234 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1235 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1236 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1237 parent_node = ipn3ke_hw_tm_node_search(hw,
1242 return -rte_tm_error_set(error,
1244 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1246 rte_strerror(EINVAL));
1252 case IPN3KE_TM_NODE_LEVEL_PORT:
1253 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1255 tm->h.port_commit_node = n;
1258 case IPN3KE_TM_NODE_LEVEL_VT:
1259 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1260 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1262 parent_node->n_children++;
1264 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1266 parent_node->n_children++;
1269 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1270 n->parent_node_id = parent_node_id;
1272 n->parent_node = parent_node;
1276 case IPN3KE_TM_NODE_LEVEL_COS:
1277 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1278 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1281 parent_node->n_children++;
1282 tm->h.n_cos_nodes++;
1283 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1285 parent_node->n_children++;
1286 tm->h.n_cos_nodes++;
1288 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1289 n->parent_node_id = parent_node_id;
1291 n->parent_node = parent_node;
1295 return -rte_tm_error_set(error,
1297 RTE_TM_ERROR_TYPE_LEVEL_ID,
1299 rte_strerror(EINVAL));
1303 n->priority = priority;
1306 if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1307 params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1308 n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1309 params->leaf.wred.wred_profile_id);
1311 rte_memcpy(&n->params, params, sizeof(n->params));
1317 ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1318 uint32_t node_id, struct rte_tm_error *error)
1320 uint32_t level_of_node_id;
1321 uint32_t node_index;
1323 if (node_id == RTE_TM_NODE_ID_NULL)
1324 return -rte_tm_error_set(error,
1326 RTE_TM_ERROR_TYPE_NODE_ID,
1328 rte_strerror(EINVAL));
1330 /* check node id and parent id*/
1331 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1332 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1333 switch (level_of_node_id) {
1334 case IPN3KE_TM_NODE_LEVEL_PORT:
1335 if (node_index != tm_id)
1336 return -rte_tm_error_set(error,
1338 RTE_TM_ERROR_TYPE_NODE_ID,
1340 rte_strerror(EINVAL));
1343 case IPN3KE_TM_NODE_LEVEL_VT:
1344 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1345 return -rte_tm_error_set(error,
1347 RTE_TM_ERROR_TYPE_NODE_ID,
1349 rte_strerror(EINVAL));
1352 case IPN3KE_TM_NODE_LEVEL_COS:
1353 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1354 return -rte_tm_error_set(error,
1356 RTE_TM_ERROR_TYPE_NODE_ID,
1358 rte_strerror(EINVAL));
1361 return -rte_tm_error_set(error,
1363 RTE_TM_ERROR_TYPE_LEVEL_ID,
1365 rte_strerror(EINVAL));
1371 /* Traffic manager node delete */
1373 ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1374 uint32_t node_id, struct rte_tm_error *error)
1376 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1377 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1378 struct ipn3ke_tm_node *n, *parent_node;
1381 uint32_t level_of_node_id;
1382 uint32_t node_state;
1383 uint32_t state_mask;
1385 /* Check hierarchy changes are currently allowed */
1386 if (tm->hierarchy_frozen)
1387 return -rte_tm_error_set(error,
1389 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1391 rte_strerror(EBUSY));
1395 status = ipn3ke_tm_node_del_check_parameter(tm_id,
1401 /* Check existing */
1403 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1404 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1405 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1407 return -rte_tm_error_set(error,
1409 RTE_TM_ERROR_TYPE_NODE_ID,
1411 rte_strerror(EINVAL));
1413 if (n->n_children > 0)
1414 return -rte_tm_error_set(error,
1416 RTE_TM_ERROR_TYPE_NODE_ID,
1418 rte_strerror(EINVAL));
1420 node_state = n->node_state;
1422 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1424 /* Check parent node */
1425 if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1427 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1428 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1429 parent_node = ipn3ke_hw_tm_node_search(hw,
1434 return -rte_tm_error_set(error,
1436 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1438 rte_strerror(EINVAL));
1439 if (n->parent_node != parent_node)
1440 return -rte_tm_error_set(error,
1442 RTE_TM_ERROR_TYPE_NODE_ID,
1444 rte_strerror(EINVAL));
1449 switch (level_of_node_id) {
1450 case IPN3KE_TM_NODE_LEVEL_PORT:
1451 if (tm->h.port_node != n)
1452 return -rte_tm_error_set(error,
1454 RTE_TM_ERROR_TYPE_NODE_ID,
1456 rte_strerror(EINVAL));
1457 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1458 tm->h.port_commit_node = n;
1462 case IPN3KE_TM_NODE_LEVEL_VT:
1463 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1465 TAILQ_REMOVE(&parent_node->children_node_list,
1467 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1469 parent_node->n_children--;
1471 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1473 parent_node->n_children--;
1476 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1480 case IPN3KE_TM_NODE_LEVEL_COS:
1481 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1483 TAILQ_REMOVE(&parent_node->children_node_list,
1485 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1488 parent_node->n_children--;
1489 tm->h.n_cos_nodes--;
1490 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1492 parent_node->n_children--;
1493 tm->h.n_cos_nodes--;
1495 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1499 return -rte_tm_error_set(error,
1501 RTE_TM_ERROR_TYPE_LEVEL_ID,
1503 rte_strerror(EINVAL));
1510 ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1511 struct rte_tm_error *error)
1513 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1515 struct ipn3ke_tm_node_list *nl;
1516 struct ipn3ke_tm_node *n, *parent_node;
1520 nl = &tm->h.cos_commit_node_list;
1521 TAILQ_FOREACH(n, nl, node) {
1522 parent_node = n->parent_node;
1523 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1524 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1525 n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1526 n->tm_id != tm_id ||
1527 parent_node == NULL ||
1529 parent_node->node_state ==
1530 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1532 parent_node->node_state ==
1533 IPN3KE_TM_NODE_STATE_IDLE) ||
1534 n->shaper_profile.valid == 0) {
1535 return -rte_tm_error_set(error,
1537 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1539 rte_strerror(EINVAL));
1541 } else if (n->node_state ==
1542 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1543 if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1544 n->n_children != 0) {
1545 return -rte_tm_error_set(error,
1547 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1549 rte_strerror(EINVAL));
1554 nl = &tm->h.vt_commit_node_list;
1555 TAILQ_FOREACH(n, nl, node) {
1556 parent_node = n->parent_node;
1557 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1558 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1559 n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1560 n->tm_id != tm_id ||
1561 parent_node == NULL ||
1563 parent_node->node_state ==
1564 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1566 parent_node->node_state ==
1567 IPN3KE_TM_NODE_STATE_IDLE) ||
1568 n->shaper_profile.valid == 0) {
1569 return -rte_tm_error_set(error,
1571 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1573 rte_strerror(EINVAL));
1575 } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL)
1576 return -rte_tm_error_set(error,
1578 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1580 rte_strerror(EINVAL));
1583 n = tm->h.port_commit_node;
1585 (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1586 n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1587 n->tm_id != tm_id ||
1588 n->parent_node != NULL ||
1589 n->shaper_profile.valid == 0)) {
1590 return -rte_tm_error_set(error,
1592 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1594 rte_strerror(EINVAL));
1601 ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1602 struct ipn3ke_tm_node *n,
1603 struct ipn3ke_tm_node *parent_node)
1610 case IPN3KE_TM_NODE_LEVEL_PORT:
1614 IPN3KE_MASK_WRITE_REG(hw,
1615 IPN3KE_QOS_TYPE_L3_X,
1618 IPN3KE_QOS_TYPE_MASK);
1623 IPN3KE_MASK_WRITE_REG(hw,
1624 IPN3KE_QOS_SCH_WT_L3_X,
1627 IPN3KE_QOS_SCH_WT_MASK);
1632 if (n->shaper_profile.valid)
1633 IPN3KE_MASK_WRITE_REG(hw,
1634 IPN3KE_QOS_SHAP_WT_L3_X,
1636 ((n->shaper_profile.e << 10) |
1637 n->shaper_profile.m),
1638 IPN3KE_QOS_SHAP_WT_MASK);
1641 case IPN3KE_TM_NODE_LEVEL_VT:
1645 IPN3KE_MASK_WRITE_REG(hw,
1646 IPN3KE_QOS_TYPE_L2_X,
1649 IPN3KE_QOS_TYPE_MASK);
1654 IPN3KE_MASK_WRITE_REG(hw,
1655 IPN3KE_QOS_SCH_WT_L2_X,
1658 IPN3KE_QOS_SCH_WT_MASK);
1663 if (n->shaper_profile.valid)
1664 IPN3KE_MASK_WRITE_REG(hw,
1665 IPN3KE_QOS_SHAP_WT_L2_X,
1667 ((n->shaper_profile.e << 10) |
1668 n->shaper_profile.m),
1669 IPN3KE_QOS_SHAP_WT_MASK);
1675 IPN3KE_MASK_WRITE_REG(hw,
1676 IPN3KE_QOS_MAP_L2_X,
1678 parent_node->node_index,
1679 IPN3KE_QOS_MAP_L2_MASK);
1682 case IPN3KE_TM_NODE_LEVEL_COS:
1684 * Configure Tail Drop mapping
1686 if (n->tdrop_profile && n->tdrop_profile->valid) {
1687 IPN3KE_MASK_WRITE_REG(hw,
1688 IPN3KE_CCB_QPROFILE_Q,
1690 n->tdrop_profile->tdrop_profile_id,
1691 IPN3KE_CCB_QPROFILE_MASK);
1697 IPN3KE_MASK_WRITE_REG(hw,
1698 IPN3KE_QOS_TYPE_L1_X,
1701 IPN3KE_QOS_TYPE_MASK);
1706 IPN3KE_MASK_WRITE_REG(hw,
1707 IPN3KE_QOS_SCH_WT_L1_X,
1710 IPN3KE_QOS_SCH_WT_MASK);
1715 if (n->shaper_profile.valid)
1716 IPN3KE_MASK_WRITE_REG(hw,
1717 IPN3KE_QOS_SHAP_WT_L1_X,
1719 ((n->shaper_profile.e << 10) |
1720 n->shaper_profile.m),
1721 IPN3KE_QOS_SHAP_WT_MASK);
1724 * Configure COS queue to port
1726 while (IPN3KE_MASK_READ_REG(hw,
1727 IPN3KE_QM_UID_CONFIG_CTRL,
1732 if (parent_node && parent_node->parent_node)
1733 IPN3KE_MASK_WRITE_REG(hw,
1734 IPN3KE_QM_UID_CONFIG_DATA,
1736 (1 << 8 | parent_node->parent_node->node_index),
1739 IPN3KE_MASK_WRITE_REG(hw,
1740 IPN3KE_QM_UID_CONFIG_CTRL,
1745 while (IPN3KE_MASK_READ_REG(hw,
1746 IPN3KE_QM_UID_CONFIG_CTRL,
1755 IPN3KE_MASK_WRITE_REG(hw,
1756 IPN3KE_QOS_MAP_L1_X,
1758 parent_node->node_index,
1759 IPN3KE_QOS_MAP_L1_MASK);
1770 ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1771 struct rte_tm_error *error)
1773 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1774 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1775 struct ipn3ke_tm_node_list *nl;
1776 struct ipn3ke_tm_node *n, *nn, *parent_node;
1778 n = tm->h.port_commit_node;
1780 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1781 tm->h.port_commit_node = NULL;
1783 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1784 } else if (n->node_state ==
1785 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1786 tm->h.port_commit_node = NULL;
1788 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1789 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1791 n->tm_id = RTE_TM_NODE_ID_NULL;
1793 return -rte_tm_error_set(error,
1795 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1797 rte_strerror(EINVAL));
1799 parent_node = n->parent_node;
1800 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1803 nl = &tm->h.vt_commit_node_list;
1804 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1805 nn = TAILQ_NEXT(n, node);
1806 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1807 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1808 parent_node = n->parent_node;
1809 TAILQ_REMOVE(nl, n, node);
1810 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1812 } else if (n->node_state ==
1813 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1814 parent_node = n->parent_node;
1815 TAILQ_REMOVE(nl, n, node);
1817 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1818 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1819 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1821 n->tm_id = RTE_TM_NODE_ID_NULL;
1822 n->parent_node = NULL;
1824 return -rte_tm_error_set(error,
1826 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1828 rte_strerror(EINVAL));
1830 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1833 nl = &tm->h.cos_commit_node_list;
1834 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1835 nn = TAILQ_NEXT(n, node);
1836 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1837 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1838 parent_node = n->parent_node;
1839 TAILQ_REMOVE(nl, n, node);
1840 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1842 } else if (n->node_state ==
1843 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1844 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1845 parent_node = n->parent_node;
1846 TAILQ_REMOVE(nl, n, node);
1848 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1849 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1850 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1852 n->tm_id = RTE_TM_NODE_ID_NULL;
1853 n->parent_node = NULL;
1855 if (n->tdrop_profile)
1856 n->tdrop_profile->n_users--;
1858 return -rte_tm_error_set(error,
1860 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1862 rte_strerror(EINVAL));
1864 ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1871 ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1873 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1874 struct ipn3ke_tm_node_list *nl;
1875 struct ipn3ke_tm_node *n;
1876 struct ipn3ke_tm_node *nn;
1878 n = tm->h.port_commit_node;
1880 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1881 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1883 n->tm_id = RTE_TM_NODE_ID_NULL;
1886 tm->h.port_commit_node = NULL;
1889 nl = &tm->h.vt_commit_node_list;
1890 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1891 nn = TAILQ_NEXT(n, node);
1893 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1894 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1895 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1897 n->tm_id = RTE_TM_NODE_ID_NULL;
1898 n->parent_node = NULL;
1902 TAILQ_REMOVE(nl, n, node);
1905 nl = &tm->h.cos_commit_node_list;
1906 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1907 nn = TAILQ_NEXT(n, node);
1909 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1910 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1911 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1913 n->tm_id = RTE_TM_NODE_ID_NULL;
1914 n->parent_node = NULL;
1915 tm->h.n_cos_nodes--;
1917 TAILQ_REMOVE(nl, n, node);
1924 ipn3ke_tm_show(struct rte_eth_dev *dev)
1926 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1928 struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1929 struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1930 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1937 IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1939 port_n = tm->h.port_node;
1940 IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1941 str_state[port_n->node_state]);
1943 vt_nl = &tm->h.port_node->children_node_list;
1944 TAILQ_FOREACH(vt_n, vt_nl, node) {
1945 cos_nl = &vt_n->children_node_list;
1946 IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
1947 TAILQ_FOREACH(cos_n, cos_nl, node) {
1948 if (cos_n->parent_node_id !=
1949 (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1950 IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1952 str_state[cos_n->node_state]);
1954 IPN3KE_AFU_PMD_DEBUG("\n");
1959 ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1961 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1963 struct ipn3ke_tm_node_list *nl;
1964 struct ipn3ke_tm_node *n;
1965 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1972 IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1973 n = tm->h.port_commit_node;
1974 IPN3KE_AFU_PMD_DEBUG("Port: ");
1976 IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1978 str_state[n->node_state]);
1979 IPN3KE_AFU_PMD_DEBUG("\n");
1981 nl = &tm->h.vt_commit_node_list;
1982 IPN3KE_AFU_PMD_DEBUG("VT : ");
1983 TAILQ_FOREACH(n, nl, node) {
1984 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1986 str_state[n->node_state]);
1988 IPN3KE_AFU_PMD_DEBUG("\n");
1990 nl = &tm->h.cos_commit_node_list;
1991 IPN3KE_AFU_PMD_DEBUG("COS : ");
1992 TAILQ_FOREACH(n, nl, node) {
1993 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1995 str_state[n->node_state]);
1997 IPN3KE_AFU_PMD_DEBUG("\n");
2000 /* Traffic manager hierarchy commit */
2002 ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
2003 int clear_on_fail, struct rte_tm_error *error)
2005 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
2009 if (tm->hierarchy_frozen)
2010 return -rte_tm_error_set(error,
2012 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2014 rte_strerror(EBUSY));
2016 ipn3ke_tm_show_commmit(dev);
2018 status = ipn3ke_tm_hierarchy_commit_check(dev, error);
2021 ipn3ke_tm_hierarchy_commit_clear(dev);
2025 ipn3ke_tm_hierarchy_hw_commit(dev, error);
2026 ipn3ke_tm_show(dev);
2031 const struct rte_tm_ops ipn3ke_tm_ops = {
2032 .node_type_get = ipn3ke_pmd_tm_node_type_get,
2033 .capabilities_get = ipn3ke_tm_capabilities_get,
2034 .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
2035 .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
2037 .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2038 .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2039 .shared_wred_context_add_update = NULL,
2040 .shared_wred_context_delete = NULL,
2042 .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2043 .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2044 .shared_shaper_add_update = NULL,
2045 .shared_shaper_delete = NULL,
2047 .node_add = ipn3ke_tm_node_add,
2048 .node_delete = ipn3ke_pmd_tm_node_delete,
2049 .node_suspend = NULL,
2050 .node_resume = NULL,
2051 .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2053 .node_parent_update = NULL,
2054 .node_shaper_update = NULL,
2055 .node_shared_shaper_update = NULL,
2056 .node_stats_update = NULL,
2057 .node_wfq_weight_mode_update = NULL,
2058 .node_cman_update = NULL,
2059 .node_wred_context_update = NULL,
2060 .node_shared_wred_context_update = NULL,
2062 .node_stats_read = NULL,
2066 ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2069 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2070 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2071 struct rte_eth_dev *i40e_pf_eth;
2072 const struct rte_tm_ops *ops;
2078 *(const void **)arg = &ipn3ke_tm_ops;
2079 } else if (rpst->i40e_pf_eth) {
2080 i40e_pf_eth = rpst->i40e_pf_eth;
2081 if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2082 i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2087 *(const void **)arg = ops;