1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2020 Hisilicon Limited.
5 #include <rte_malloc.h>
7 #include "hns3_ethdev.h"
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
16 * This API will called in pci device probe stage, we can't call
17 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18 * not setup), so we call the hns3_dev_infos_get.
20 struct rte_eth_dev_info dev_info;
22 memset(&dev_info, 0, sizeof(dev_info));
23 (void)hns3_dev_infos_get(dev, &dev_info);
24 return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
30 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
33 pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
34 pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
35 pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
37 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
38 pf->tm_conf.nb_shaper_profile = 0;
40 pf->tm_conf.root = NULL;
41 TAILQ_INIT(&pf->tm_conf.tc_list);
42 TAILQ_INIT(&pf->tm_conf.queue_list);
43 pf->tm_conf.nb_tc_node = 0;
44 pf->tm_conf.nb_queue_node = 0;
46 pf->tm_conf.committed = false;
50 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
52 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
53 struct hns3_tm_shaper_profile *shaper_profile;
54 struct hns3_tm_node *tm_node;
56 if (pf->tm_conf.nb_queue_node > 0) {
57 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
58 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
61 pf->tm_conf.nb_queue_node = 0;
64 if (pf->tm_conf.nb_tc_node > 0) {
65 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
66 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
69 pf->tm_conf.nb_tc_node = 0;
72 if (pf->tm_conf.root != NULL) {
73 rte_free(pf->tm_conf.root);
74 pf->tm_conf.root = NULL;
77 if (pf->tm_conf.nb_shaper_profile > 0) {
78 while ((shaper_profile =
79 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
80 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
81 shaper_profile, node);
82 rte_free(shaper_profile);
84 pf->tm_conf.nb_shaper_profile = 0;
87 pf->tm_conf.nb_leaf_nodes_max = 0;
88 pf->tm_conf.nb_nodes_max = 0;
89 pf->tm_conf.nb_shaper_profile_max = 0;
92 static inline uint64_t
93 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
95 #define FIRMWARE_TO_TM_RATE_SCALE 125000
96 /* tm rate unit is Bps, firmware rate is Mbps */
97 return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
100 static inline uint32_t
101 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
103 #define TM_TO_FIRMWARE_RATE_SCALE 125000
104 /* tm rate unit is Bps, firmware rate is Mbps */
105 return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
109 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
110 struct rte_tm_capabilities *cap,
111 struct rte_tm_error *error)
113 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
114 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
116 if (cap == NULL || error == NULL)
119 error->type = RTE_TM_ERROR_TYPE_NONE;
121 memset(cap, 0, sizeof(struct rte_tm_capabilities));
123 cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
124 cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
125 cap->non_leaf_nodes_identical = 1;
126 cap->leaf_nodes_identical = 1;
127 cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
128 cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
129 cap->shaper_private_dual_rate_n_max = 0;
130 cap->shaper_private_rate_min = 0;
131 cap->shaper_private_rate_max =
132 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
133 cap->shaper_shared_n_max = 0;
134 cap->shaper_shared_n_nodes_per_shaper_max = 0;
135 cap->shaper_shared_n_shapers_per_node_max = 0;
136 cap->shaper_shared_dual_rate_n_max = 0;
137 cap->shaper_shared_rate_min = 0;
138 cap->shaper_shared_rate_max = 0;
140 cap->sched_n_children_max = max_tx_queues;
141 cap->sched_sp_n_priorities_max = 1;
142 cap->sched_wfq_n_children_per_group_max = 0;
143 cap->sched_wfq_n_groups_max = 0;
144 cap->sched_wfq_weight_max = 1;
146 cap->cman_head_drop_supported = 0;
147 cap->dynamic_update_mask = 0;
148 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
149 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
150 cap->cman_wred_context_n_max = 0;
151 cap->cman_wred_context_private_n_max = 0;
152 cap->cman_wred_context_shared_n_max = 0;
153 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
154 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
160 static struct hns3_tm_shaper_profile *
161 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
162 uint32_t shaper_profile_id)
164 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
165 struct hns3_shaper_profile_list *shaper_profile_list =
166 &pf->tm_conf.shaper_profile_list;
167 struct hns3_tm_shaper_profile *shaper_profile;
169 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
170 if (shaper_profile_id == shaper_profile->shaper_profile_id)
171 return shaper_profile;
178 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
179 struct rte_tm_shaper_params *profile,
180 struct rte_tm_error *error)
182 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
184 if (profile->committed.rate) {
185 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
186 error->message = "committed rate not supported";
190 if (profile->committed.size) {
191 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
192 error->message = "committed bucket size not supported";
196 if (profile->peak.rate >
197 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
198 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
199 error->message = "peak rate too large";
203 if (profile->peak.size) {
204 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
205 error->message = "peak bucket size not supported";
209 if (profile->pkt_length_adjust) {
210 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
211 error->message = "packet length adjustment not supported";
215 if (profile->packet_mode) {
216 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
217 error->message = "packet mode not supported";
225 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
226 uint32_t shaper_profile_id,
227 struct rte_tm_shaper_params *profile,
228 struct rte_tm_error *error)
230 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
231 struct hns3_tm_shaper_profile *shaper_profile;
234 if (profile == NULL || error == NULL)
237 if (pf->tm_conf.nb_shaper_profile >=
238 pf->tm_conf.nb_shaper_profile_max) {
239 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
240 error->message = "too much profiles";
244 ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
248 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
249 if (shaper_profile) {
250 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
251 error->message = "profile ID exist";
255 shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
256 sizeof(struct hns3_tm_shaper_profile),
258 if (shaper_profile == NULL)
261 shaper_profile->shaper_profile_id = shaper_profile_id;
262 memcpy(&shaper_profile->profile, profile,
263 sizeof(struct rte_tm_shaper_params));
264 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
265 shaper_profile, node);
266 pf->tm_conf.nb_shaper_profile++;
272 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
273 uint32_t shaper_profile_id,
274 struct rte_tm_error *error)
276 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
277 struct hns3_tm_shaper_profile *shaper_profile;
282 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
283 if (shaper_profile == NULL) {
284 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
285 error->message = "profile ID not exist";
289 if (shaper_profile->reference_count) {
290 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
291 error->message = "profile in use";
295 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
296 rte_free(shaper_profile);
297 pf->tm_conf.nb_shaper_profile--;
302 static struct hns3_tm_node *
303 hns3_tm_node_search(struct rte_eth_dev *dev,
305 enum hns3_tm_node_type *node_type)
307 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
308 struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
309 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
310 struct hns3_tm_node *tm_node;
312 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
313 *node_type = HNS3_TM_NODE_TYPE_PORT;
314 return pf->tm_conf.root;
317 TAILQ_FOREACH(tm_node, tc_list, node) {
318 if (tm_node->id == node_id) {
319 *node_type = HNS3_TM_NODE_TYPE_TC;
324 TAILQ_FOREACH(tm_node, queue_list, node) {
325 if (tm_node->id == node_id) {
326 *node_type = HNS3_TM_NODE_TYPE_QUEUE;
335 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
336 struct rte_tm_node_params *params,
337 struct rte_tm_error *error)
339 struct hns3_tm_shaper_profile *shaper_profile;
341 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
342 shaper_profile = hns3_tm_shaper_profile_search(dev,
343 params->shaper_profile_id);
344 if (shaper_profile == NULL) {
346 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
347 error->message = "shaper profile not exist";
352 if (params->nonleaf.wfq_weight_mode) {
354 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
355 error->message = "WFQ not supported";
359 if (params->nonleaf.n_sp_priorities != 1) {
361 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
362 error->message = "SP priority not supported";
370 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
371 struct rte_tm_node_params *params,
372 struct rte_tm_error *error)
375 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
377 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
378 error->message = "shaper not supported";
382 if (params->leaf.cman) {
383 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
384 error->message = "congestion management not supported";
388 if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
390 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
391 error->message = "WRED not supported";
395 if (params->leaf.wred.shared_wred_context_id) {
397 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
398 error->message = "WRED not supported";
402 if (params->leaf.wred.n_shared_wred_contexts) {
404 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
405 error->message = "WRED not supported";
413 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
414 uint32_t priority, uint32_t weight,
415 struct rte_tm_node_params *params,
416 struct rte_tm_error *error)
418 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
419 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
421 if (node_id == RTE_TM_NODE_ID_NULL) {
422 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
423 error->message = "invalid node id";
427 if (hns3_tm_node_search(dev, node_id, &node_type)) {
428 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
429 error->message = "node id already used";
434 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
435 error->message = "priority should be 0";
440 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
441 error->message = "weight must be 1";
445 if (params->shared_shaper_id) {
446 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
447 error->message = "shared shaper not supported";
450 if (params->n_shared_shapers) {
451 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
452 error->message = "shared shaper not supported";
456 if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
457 return hns3_tm_nonleaf_node_param_check(dev, params, error);
459 return hns3_tm_leaf_node_param_check(dev, params, error);
463 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
464 uint32_t level_id, struct rte_tm_node_params *params,
465 struct rte_tm_error *error)
467 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
468 struct hns3_tm_node *tm_node;
470 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
471 level_id != HNS3_TM_NODE_LEVEL_PORT) {
472 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
473 error->message = "wrong level";
477 if (node_id != pf->tm_conf.nb_nodes_max - 1) {
478 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
479 error->message = "invalid port node ID";
483 if (pf->tm_conf.root) {
484 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
485 error->message = "already have a root";
489 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
493 tm_node->id = node_id;
494 tm_node->reference_count = 0;
495 tm_node->parent = NULL;
496 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
497 params->shaper_profile_id);
498 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
499 pf->tm_conf.root = tm_node;
501 if (tm_node->shaper_profile)
502 tm_node->shaper_profile->reference_count++;
508 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
509 uint32_t level_id, struct hns3_tm_node *parent_node,
510 struct rte_tm_node_params *params,
511 struct rte_tm_error *error)
513 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
514 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
515 struct hns3_tm_node *tm_node;
517 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
518 level_id != HNS3_TM_NODE_LEVEL_TC) {
519 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
520 error->message = "wrong level";
524 if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
525 node_id < pf->tm_conf.nb_leaf_nodes_max ||
526 hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
527 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
528 error->message = "invalid tc node ID";
532 if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
533 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
534 error->message = "too many TCs";
538 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
542 tm_node->id = node_id;
543 tm_node->reference_count = 0;
544 tm_node->parent = parent_node;
545 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
546 params->shaper_profile_id);
547 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
548 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
549 pf->tm_conf.nb_tc_node++;
550 tm_node->parent->reference_count++;
552 if (tm_node->shaper_profile)
553 tm_node->shaper_profile->reference_count++;
559 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
560 uint32_t level_id, struct hns3_tm_node *parent_node,
561 struct rte_tm_node_params *params,
562 struct rte_tm_error *error)
564 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
565 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
566 struct hns3_tm_node *tm_node;
568 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
569 level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
570 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
571 error->message = "wrong level";
575 /* note: dev->data->nb_tx_queues <= max_tx_queues */
576 if (node_id >= dev->data->nb_tx_queues) {
577 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
578 error->message = "invalid queue node ID";
582 if (hns3_txq_mapped_tc_get(hw, node_id) !=
583 hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
584 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
585 error->message = "queue's TC not match parent's TC";
589 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
593 tm_node->id = node_id;
594 tm_node->reference_count = 0;
595 tm_node->parent = parent_node;
596 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
597 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
598 pf->tm_conf.nb_queue_node++;
599 tm_node->parent->reference_count++;
605 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
606 uint32_t parent_node_id, uint32_t priority,
607 uint32_t weight, uint32_t level_id,
608 struct rte_tm_node_params *params,
609 struct rte_tm_error *error)
611 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
612 enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
613 struct hns3_tm_node *parent_node;
616 if (params == NULL || error == NULL)
619 if (pf->tm_conf.committed) {
620 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
621 error->message = "already committed";
625 ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
630 /* root node who don't have a parent */
631 if (parent_node_id == RTE_TM_NODE_ID_NULL)
632 return hns3_tm_port_node_add(dev, node_id, level_id,
635 parent_node = hns3_tm_node_search(dev, parent_node_id,
637 if (parent_node == NULL) {
638 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
639 error->message = "parent not exist";
643 if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
644 parent_node_type != HNS3_TM_NODE_TYPE_TC) {
645 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
646 error->message = "parent is not port or TC";
650 if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
651 return hns3_tm_tc_node_add(dev, node_id, level_id,
652 parent_node, params, error);
654 return hns3_tm_queue_node_add(dev, node_id, level_id,
655 parent_node, params, error);
659 hns3_tm_node_do_delete(struct hns3_pf *pf,
660 enum hns3_tm_node_type node_type,
661 struct hns3_tm_node *tm_node)
663 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
664 if (tm_node->shaper_profile)
665 tm_node->shaper_profile->reference_count--;
667 pf->tm_conf.root = NULL;
671 if (tm_node->shaper_profile)
672 tm_node->shaper_profile->reference_count--;
673 tm_node->parent->reference_count--;
674 if (node_type == HNS3_TM_NODE_TYPE_TC) {
675 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
676 pf->tm_conf.nb_tc_node--;
678 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
679 pf->tm_conf.nb_queue_node--;
685 hns3_tm_node_delete(struct rte_eth_dev *dev,
687 struct rte_tm_error *error)
689 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
690 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
691 struct hns3_tm_node *tm_node;
696 if (pf->tm_conf.committed) {
697 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
698 error->message = "already committed";
702 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
703 if (tm_node == NULL) {
704 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
705 error->message = "no such node";
709 if (tm_node->reference_count) {
710 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
711 error->message = "cannot delete a node which has children";
715 hns3_tm_node_do_delete(pf, node_type, tm_node);
721 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
722 int *is_leaf, struct rte_tm_error *error)
724 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
725 struct hns3_tm_node *tm_node;
727 if (is_leaf == NULL || error == NULL)
730 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
731 if (tm_node == NULL) {
732 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
733 error->message = "no such node";
737 if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
746 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
748 struct rte_tm_level_capabilities *cap)
750 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
751 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
753 if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
754 cap->n_nodes_max = 1;
755 cap->n_nodes_nonleaf_max = 1;
756 cap->n_nodes_leaf_max = 0;
758 cap->n_nodes_max = HNS3_MAX_TC_NUM;
759 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
760 cap->n_nodes_leaf_max = 0;
763 cap->non_leaf_nodes_identical = 1;
764 cap->leaf_nodes_identical = 1;
766 cap->nonleaf.shaper_private_supported = true;
767 cap->nonleaf.shaper_private_dual_rate_supported = false;
768 cap->nonleaf.shaper_private_rate_min = 0;
769 cap->nonleaf.shaper_private_rate_max =
770 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
771 cap->nonleaf.shaper_shared_n_max = 0;
772 if (level_id == HNS3_TM_NODE_LEVEL_PORT)
773 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
775 cap->nonleaf.sched_n_children_max = max_tx_queues;
776 cap->nonleaf.sched_sp_n_priorities_max = 1;
777 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
778 cap->nonleaf.sched_wfq_n_groups_max = 0;
779 cap->nonleaf.sched_wfq_weight_max = 1;
780 cap->nonleaf.stats_mask = 0;
784 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
785 struct rte_tm_level_capabilities *cap)
787 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
789 cap->n_nodes_max = max_tx_queues;
790 cap->n_nodes_nonleaf_max = 0;
791 cap->n_nodes_leaf_max = max_tx_queues;
793 cap->non_leaf_nodes_identical = 1;
794 cap->leaf_nodes_identical = 1;
796 cap->leaf.shaper_private_supported = false;
797 cap->leaf.shaper_private_dual_rate_supported = false;
798 cap->leaf.shaper_private_rate_min = 0;
799 cap->leaf.shaper_private_rate_max = 0;
800 cap->leaf.shaper_shared_n_max = 0;
801 cap->leaf.cman_head_drop_supported = false;
802 cap->leaf.cman_wred_context_private_supported = false;
803 cap->leaf.cman_wred_context_shared_n_max = 0;
804 cap->leaf.stats_mask = 0;
808 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
810 struct rte_tm_level_capabilities *cap,
811 struct rte_tm_error *error)
813 if (cap == NULL || error == NULL)
816 if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
817 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
818 error->message = "too deep level";
822 memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
824 if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
825 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
827 hns3_tm_leaf_level_capabilities_get(dev, cap);
833 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
834 enum hns3_tm_node_type node_type,
835 struct rte_tm_node_capabilities *cap)
837 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
838 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
840 cap->shaper_private_supported = true;
841 cap->shaper_private_dual_rate_supported = false;
842 cap->shaper_private_rate_min = 0;
843 cap->shaper_private_rate_max =
844 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
845 cap->shaper_shared_n_max = 0;
847 if (node_type == HNS3_TM_NODE_TYPE_PORT)
848 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
850 cap->nonleaf.sched_n_children_max = max_tx_queues;
851 cap->nonleaf.sched_sp_n_priorities_max = 1;
852 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
853 cap->nonleaf.sched_wfq_n_groups_max = 0;
854 cap->nonleaf.sched_wfq_weight_max = 1;
860 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
861 struct rte_tm_node_capabilities *cap)
863 cap->shaper_private_supported = false;
864 cap->shaper_private_dual_rate_supported = false;
865 cap->shaper_private_rate_min = 0;
866 cap->shaper_private_rate_max = 0;
867 cap->shaper_shared_n_max = 0;
869 cap->leaf.cman_head_drop_supported = false;
870 cap->leaf.cman_wred_context_private_supported = false;
871 cap->leaf.cman_wred_context_shared_n_max = 0;
877 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
879 struct rte_tm_node_capabilities *cap,
880 struct rte_tm_error *error)
882 enum hns3_tm_node_type node_type;
883 struct hns3_tm_node *tm_node;
885 if (cap == NULL || error == NULL)
888 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
889 if (tm_node == NULL) {
890 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
891 error->message = "no such node";
895 memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
897 if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
898 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
900 hns3_tm_leaf_node_capabilities_get(dev, cap);
906 hns3_tm_config_port_rate(struct hns3_hw *hw,
907 struct hns3_tm_shaper_profile *shaper_profile)
909 uint32_t firmware_rate;
912 if (shaper_profile) {
913 rate = shaper_profile->profile.peak.rate;
914 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
916 firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
920 * The TM shaper topology after device inited:
923 * ... |----> pg0 shaper ----> port shaper
927 * Because port shaper rate maybe changed by firmware, to avoid
928 * concurrent configure, driver use pg0 shaper to achieve the rate limit
931 * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
933 return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
937 hns3_tm_config_tc_rate(struct hns3_hw *hw,
939 struct hns3_tm_shaper_profile *shaper_profile)
941 uint32_t firmware_rate;
944 if (shaper_profile) {
945 rate = shaper_profile->profile.peak.rate;
946 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
948 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
951 return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
955 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
957 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
958 struct hns3_tm_conf *tm_conf = &pf->tm_conf;
959 struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
960 struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
961 struct hns3_tm_node *tm_node;
964 TAILQ_FOREACH(tm_node, tc_list, node) {
965 if (!tm_node->reference_count) {
966 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
967 error->message = "TC without queue assigned";
971 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
973 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
974 error->message = "node's TC not exist";
980 TAILQ_FOREACH(tm_node, queue_list, node) {
981 if (tm_node->id >= hw->data->nb_tx_queues) {
982 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
983 error->message = "node's queue invalid";
987 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
988 hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
989 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
990 error->message = "queue's TC not match parent's TC";
999 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1000 struct rte_tm_error *error)
1002 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1003 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1004 struct hns3_tm_node *tm_node;
1009 tm_node = pf->tm_conf.root;
1010 if (tm_node->shaper_profile) {
1011 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1013 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1014 error->message = "fail to set port peak rate";
1020 TAILQ_FOREACH(tm_node, tc_list, node) {
1021 if (tm_node->shaper_profile == NULL)
1024 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1025 ret = hns3_tm_config_tc_rate(hw, tc_no,
1026 tm_node->shaper_profile);
1028 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1029 error->message = "fail to set TC peak rate";
1038 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1040 struct rte_tm_error *error)
1042 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1050 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1051 error->message = "device is resetting";
1052 /* don't goto fail_clear, user may try later */
1056 if (pf->tm_conf.root == NULL)
1059 /* check configure before commit make sure key configure not violated */
1060 if (!hns3_tm_configure_check(hw, error))
1063 ret = hns3_tm_hierarchy_do_commit(hw, error);
1068 pf->tm_conf.committed = true;
1072 if (clear_on_fail) {
1073 hns3_tm_conf_uninit(dev);
1074 hns3_tm_conf_init(dev);
1080 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1082 struct rte_tm_error *error)
1084 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087 rte_spinlock_lock(&hw->lock);
1088 ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1089 rte_spinlock_unlock(&hw->lock);
1095 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1097 enum hns3_tm_node_type node_type,
1098 struct hns3_tm_shaper_profile *shaper_profile,
1099 struct rte_tm_error *error)
1101 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1105 if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1106 if (shaper_profile != NULL) {
1107 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1108 error->message = "queue node shaper not supported";
1114 if (!pf->tm_conf.committed)
1117 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1118 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1120 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1121 error->message = "fail to update port peak rate";
1128 * update TC's shaper
1130 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1131 ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1133 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1134 error->message = "fail to update TC peak rate";
1141 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1143 uint32_t shaper_profile_id,
1144 struct rte_tm_error *error)
1146 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1147 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1148 struct hns3_tm_shaper_profile *profile = NULL;
1149 struct hns3_tm_node *tm_node;
1154 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1155 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1156 error->message = "device is resetting";
1160 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1161 if (tm_node == NULL) {
1162 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1163 error->message = "no such node";
1167 if (shaper_profile_id == tm_node->params.shaper_profile_id)
1170 if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1171 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1172 if (profile == NULL) {
1173 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1174 error->message = "profile ID not exist";
1179 if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1183 if (tm_node->shaper_profile)
1184 tm_node->shaper_profile->reference_count--;
1185 tm_node->shaper_profile = profile;
1186 tm_node->params.shaper_profile_id = shaper_profile_id;
1187 if (profile != NULL)
1188 profile->reference_count++;
1194 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1196 uint32_t shaper_profile_id,
1197 struct rte_tm_error *error)
1199 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202 rte_spinlock_lock(&hw->lock);
1203 ret = hns3_tm_node_shaper_update(dev, node_id,
1204 shaper_profile_id, error);
1205 rte_spinlock_unlock(&hw->lock);
1210 static const struct rte_tm_ops hns3_tm_ops = {
1211 .capabilities_get = hns3_tm_capabilities_get,
1212 .shaper_profile_add = hns3_tm_shaper_profile_add,
1213 .shaper_profile_delete = hns3_tm_shaper_profile_del,
1214 .node_add = hns3_tm_node_add,
1215 .node_delete = hns3_tm_node_delete,
1216 .node_type_get = hns3_tm_node_type_get,
1217 .level_capabilities_get = hns3_tm_level_capabilities_get,
1218 .node_capabilities_get = hns3_tm_node_capabilities_get,
1219 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
1220 .node_shaper_update = hns3_tm_node_shaper_update_wrap,
1224 hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1230 *(const void **)arg = &hns3_tm_ops;
1236 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1238 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1240 if (pf->tm_conf.root && !pf->tm_conf.committed)
1242 "please call hierarchy_commit() before starting the port.");
1246 * We need clear tm_conf committed flag when device stop so that user can modify
1247 * tm configuration (e.g. add or delete node).
1249 * If user don't call hierarchy commit when device start later, the Port/TC's
1250 * shaper rate still the same as previous committed.
1252 * To avoid the above problem, we need recover Port/TC shaper rate when device
1256 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1258 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1259 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1260 struct hns3_tm_node *tm_node;
1263 if (!pf->tm_conf.committed)
1266 tm_node = pf->tm_conf.root;
1267 if (tm_node != NULL && tm_node->shaper_profile)
1268 (void)hns3_tm_config_port_rate(hw, NULL);
1270 TAILQ_FOREACH(tm_node, tc_list, node) {
1271 if (tm_node->shaper_profile == NULL)
1273 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1274 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1277 pf->tm_conf.committed = false;
1281 hns3_tm_conf_update(struct hns3_hw *hw)
1283 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1284 struct rte_tm_error error;
1286 if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1289 memset(&error, 0, sizeof(struct rte_tm_error));
1290 return hns3_tm_hierarchy_do_commit(hw, &error);