1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 HiSilicon Limited.
5 #include <rte_malloc.h>
7 #include "hns3_common.h"
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
16 * This API will called in pci device probe stage, we can't call
17 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18 * not setup), so we call the hns3_dev_infos_get.
20 struct rte_eth_dev_info dev_info;
22 memset(&dev_info, 0, sizeof(dev_info));
23 (void)hns3_dev_infos_get(dev, &dev_info);
24 return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
30 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
32 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
34 if (!hns3_dev_get_support(hw, TM))
37 pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
38 pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
39 pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
41 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
42 pf->tm_conf.nb_shaper_profile = 0;
44 pf->tm_conf.root = NULL;
45 TAILQ_INIT(&pf->tm_conf.tc_list);
46 TAILQ_INIT(&pf->tm_conf.queue_list);
47 pf->tm_conf.nb_tc_node = 0;
48 pf->tm_conf.nb_queue_node = 0;
50 pf->tm_conf.committed = false;
54 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
56 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
57 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
58 struct hns3_tm_shaper_profile *shaper_profile;
59 struct hns3_tm_node *tm_node;
61 if (!hns3_dev_get_support(hw, TM))
64 if (pf->tm_conf.nb_queue_node > 0) {
65 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
66 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
69 pf->tm_conf.nb_queue_node = 0;
72 if (pf->tm_conf.nb_tc_node > 0) {
73 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
74 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
77 pf->tm_conf.nb_tc_node = 0;
80 if (pf->tm_conf.root != NULL) {
81 rte_free(pf->tm_conf.root);
82 pf->tm_conf.root = NULL;
85 if (pf->tm_conf.nb_shaper_profile > 0) {
86 while ((shaper_profile =
87 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
88 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
89 shaper_profile, node);
90 rte_free(shaper_profile);
92 pf->tm_conf.nb_shaper_profile = 0;
95 pf->tm_conf.nb_leaf_nodes_max = 0;
96 pf->tm_conf.nb_nodes_max = 0;
97 pf->tm_conf.nb_shaper_profile_max = 0;
100 static inline uint64_t
101 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
103 #define FIRMWARE_TO_TM_RATE_SCALE 125000
104 /* tm rate unit is Bps, firmware rate is Mbps */
105 return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
108 static inline uint32_t
109 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
111 #define TM_TO_FIRMWARE_RATE_SCALE 125000
112 /* tm rate unit is Bps, firmware rate is Mbps */
113 return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
117 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
118 struct rte_tm_capabilities *cap,
119 struct rte_tm_error *error)
121 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
122 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
124 if (cap == NULL || error == NULL)
127 error->type = RTE_TM_ERROR_TYPE_NONE;
129 memset(cap, 0, sizeof(struct rte_tm_capabilities));
131 cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
132 cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
133 cap->non_leaf_nodes_identical = 1;
134 cap->leaf_nodes_identical = 1;
135 cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
136 cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
137 cap->shaper_private_rate_max =
138 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
140 cap->sched_n_children_max = max_tx_queues;
141 cap->sched_sp_n_priorities_max = 1;
142 cap->sched_wfq_weight_max = 1;
144 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
145 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
150 static struct hns3_tm_shaper_profile *
151 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
152 uint32_t shaper_profile_id)
154 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
155 struct hns3_shaper_profile_list *shaper_profile_list =
156 &pf->tm_conf.shaper_profile_list;
157 struct hns3_tm_shaper_profile *shaper_profile;
159 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
160 if (shaper_profile_id == shaper_profile->shaper_profile_id)
161 return shaper_profile;
168 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
169 struct rte_tm_shaper_params *profile,
170 struct rte_tm_error *error)
172 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
174 if (profile->committed.rate) {
175 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
176 error->message = "committed rate not supported";
180 if (profile->committed.size) {
181 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
182 error->message = "committed bucket size not supported";
186 if (profile->peak.rate >
187 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
188 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
189 error->message = "peak rate too large";
193 if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
194 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
195 error->message = "peak rate must be at least 1Mbps";
199 if (profile->peak.size) {
200 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
201 error->message = "peak bucket size not supported";
205 if (profile->pkt_length_adjust) {
206 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
207 error->message = "packet length adjustment not supported";
211 if (profile->packet_mode) {
212 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
213 error->message = "packet mode not supported";
221 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
222 uint32_t shaper_profile_id,
223 struct rte_tm_shaper_params *profile,
224 struct rte_tm_error *error)
226 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
227 struct hns3_tm_shaper_profile *shaper_profile;
230 if (profile == NULL || error == NULL)
233 if (pf->tm_conf.nb_shaper_profile >=
234 pf->tm_conf.nb_shaper_profile_max) {
235 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
236 error->message = "too much profiles";
240 ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
244 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
245 if (shaper_profile) {
246 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
247 error->message = "profile ID exist";
251 shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
252 sizeof(struct hns3_tm_shaper_profile),
254 if (shaper_profile == NULL)
257 shaper_profile->shaper_profile_id = shaper_profile_id;
258 memcpy(&shaper_profile->profile, profile,
259 sizeof(struct rte_tm_shaper_params));
260 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
261 shaper_profile, node);
262 pf->tm_conf.nb_shaper_profile++;
268 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
269 uint32_t shaper_profile_id,
270 struct rte_tm_error *error)
272 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
273 struct hns3_tm_shaper_profile *shaper_profile;
278 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
279 if (shaper_profile == NULL) {
280 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
281 error->message = "profile ID not exist";
285 if (shaper_profile->reference_count) {
286 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
287 error->message = "profile in use";
291 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
292 rte_free(shaper_profile);
293 pf->tm_conf.nb_shaper_profile--;
298 static struct hns3_tm_node *
299 hns3_tm_node_search(struct rte_eth_dev *dev,
301 enum hns3_tm_node_type *node_type)
303 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
304 struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
305 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
306 struct hns3_tm_node *tm_node;
308 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
309 *node_type = HNS3_TM_NODE_TYPE_PORT;
310 return pf->tm_conf.root;
313 TAILQ_FOREACH(tm_node, tc_list, node) {
314 if (tm_node->id == node_id) {
315 *node_type = HNS3_TM_NODE_TYPE_TC;
320 TAILQ_FOREACH(tm_node, queue_list, node) {
321 if (tm_node->id == node_id) {
322 *node_type = HNS3_TM_NODE_TYPE_QUEUE;
331 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
332 struct rte_tm_node_params *params,
333 struct rte_tm_error *error)
335 struct hns3_tm_shaper_profile *shaper_profile;
337 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
338 shaper_profile = hns3_tm_shaper_profile_search(dev,
339 params->shaper_profile_id);
340 if (shaper_profile == NULL) {
342 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
343 error->message = "shaper profile not exist";
348 if (params->nonleaf.wfq_weight_mode) {
350 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
351 error->message = "WFQ not supported";
355 if (params->nonleaf.n_sp_priorities != 1) {
357 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
358 error->message = "SP priority not supported";
366 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
367 struct rte_tm_node_params *params,
368 struct rte_tm_error *error)
371 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
373 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
374 error->message = "shaper not supported";
378 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
379 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
380 error->message = "congestion management not supported";
384 if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
386 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
387 error->message = "WRED not supported";
391 if (params->leaf.wred.shared_wred_context_id) {
393 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
394 error->message = "WRED not supported";
398 if (params->leaf.wred.n_shared_wred_contexts) {
400 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
401 error->message = "WRED not supported";
409 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
410 uint32_t priority, uint32_t weight,
411 struct rte_tm_node_params *params,
412 struct rte_tm_error *error)
414 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
415 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
417 if (node_id == RTE_TM_NODE_ID_NULL) {
418 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
419 error->message = "invalid node id";
423 if (hns3_tm_node_search(dev, node_id, &node_type)) {
424 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
425 error->message = "node id already used";
430 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
431 error->message = "priority should be 0";
436 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
437 error->message = "weight must be 1";
441 if (params->shared_shaper_id) {
442 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
443 error->message = "shared shaper not supported";
446 if (params->n_shared_shapers) {
447 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
448 error->message = "shared shaper not supported";
452 if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
453 return hns3_tm_nonleaf_node_param_check(dev, params, error);
455 return hns3_tm_leaf_node_param_check(dev, params, error);
459 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
460 uint32_t level_id, struct rte_tm_node_params *params,
461 struct rte_tm_error *error)
463 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464 struct hns3_tm_node *tm_node;
466 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
467 level_id != HNS3_TM_NODE_LEVEL_PORT) {
468 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
469 error->message = "wrong level";
473 if (node_id != pf->tm_conf.nb_nodes_max - 1) {
474 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
475 error->message = "invalid port node ID";
479 if (pf->tm_conf.root) {
480 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
481 error->message = "already have a root";
485 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
489 tm_node->id = node_id;
490 tm_node->reference_count = 0;
491 tm_node->parent = NULL;
492 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
493 params->shaper_profile_id);
494 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
495 pf->tm_conf.root = tm_node;
497 if (tm_node->shaper_profile)
498 tm_node->shaper_profile->reference_count++;
504 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
505 uint32_t level_id, struct hns3_tm_node *parent_node,
506 struct rte_tm_node_params *params,
507 struct rte_tm_error *error)
509 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
510 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
511 struct hns3_tm_node *tm_node;
513 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
514 level_id != HNS3_TM_NODE_LEVEL_TC) {
515 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
516 error->message = "wrong level";
520 if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
521 node_id < pf->tm_conf.nb_leaf_nodes_max ||
522 hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
523 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
524 error->message = "invalid tc node ID";
528 if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
529 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
530 error->message = "too many TCs";
534 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
538 tm_node->id = node_id;
539 tm_node->reference_count = 0;
540 tm_node->parent = parent_node;
541 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
542 params->shaper_profile_id);
543 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
544 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
545 pf->tm_conf.nb_tc_node++;
546 tm_node->parent->reference_count++;
548 if (tm_node->shaper_profile)
549 tm_node->shaper_profile->reference_count++;
555 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
556 uint32_t level_id, struct hns3_tm_node *parent_node,
557 struct rte_tm_node_params *params,
558 struct rte_tm_error *error)
560 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
561 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
562 struct hns3_tm_node *tm_node;
564 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
565 level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
566 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
567 error->message = "wrong level";
571 /* note: dev->data->nb_tx_queues <= max_tx_queues */
572 if (node_id >= dev->data->nb_tx_queues) {
573 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
574 error->message = "invalid queue node ID";
578 if (hns3_txq_mapped_tc_get(hw, node_id) !=
579 hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
580 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
581 error->message = "queue's TC not match parent's TC";
585 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
589 tm_node->id = node_id;
590 tm_node->reference_count = 0;
591 tm_node->parent = parent_node;
592 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
593 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
594 pf->tm_conf.nb_queue_node++;
595 tm_node->parent->reference_count++;
601 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
602 uint32_t parent_node_id, uint32_t priority,
603 uint32_t weight, uint32_t level_id,
604 struct rte_tm_node_params *params,
605 struct rte_tm_error *error)
607 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
608 enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
609 struct hns3_tm_node *parent_node;
612 if (params == NULL || error == NULL)
615 if (pf->tm_conf.committed) {
616 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
617 error->message = "already committed";
621 ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
626 /* root node who don't have a parent */
627 if (parent_node_id == RTE_TM_NODE_ID_NULL)
628 return hns3_tm_port_node_add(dev, node_id, level_id,
631 parent_node = hns3_tm_node_search(dev, parent_node_id,
633 if (parent_node == NULL) {
634 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
635 error->message = "parent not exist";
639 if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
640 parent_node_type != HNS3_TM_NODE_TYPE_TC) {
641 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
642 error->message = "parent is not port or TC";
646 if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
647 return hns3_tm_tc_node_add(dev, node_id, level_id,
648 parent_node, params, error);
650 return hns3_tm_queue_node_add(dev, node_id, level_id,
651 parent_node, params, error);
655 hns3_tm_node_do_delete(struct hns3_pf *pf,
656 enum hns3_tm_node_type node_type,
657 struct hns3_tm_node *tm_node)
659 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
660 if (tm_node->shaper_profile)
661 tm_node->shaper_profile->reference_count--;
663 pf->tm_conf.root = NULL;
667 if (tm_node->shaper_profile)
668 tm_node->shaper_profile->reference_count--;
669 tm_node->parent->reference_count--;
670 if (node_type == HNS3_TM_NODE_TYPE_TC) {
671 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
672 pf->tm_conf.nb_tc_node--;
674 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
675 pf->tm_conf.nb_queue_node--;
681 hns3_tm_node_delete(struct rte_eth_dev *dev,
683 struct rte_tm_error *error)
685 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
686 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
687 struct hns3_tm_node *tm_node;
692 if (pf->tm_conf.committed) {
693 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
694 error->message = "already committed";
698 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
699 if (tm_node == NULL) {
700 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
701 error->message = "no such node";
705 if (tm_node->reference_count) {
706 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
707 error->message = "cannot delete a node which has children";
711 hns3_tm_node_do_delete(pf, node_type, tm_node);
717 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
718 int *is_leaf, struct rte_tm_error *error)
720 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
721 struct hns3_tm_node *tm_node;
723 if (is_leaf == NULL || error == NULL)
726 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
727 if (tm_node == NULL) {
728 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
729 error->message = "no such node";
733 if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
742 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
744 struct rte_tm_level_capabilities *cap)
746 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
747 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
749 if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
750 cap->n_nodes_max = 1;
751 cap->n_nodes_nonleaf_max = 1;
752 cap->n_nodes_leaf_max = 0;
754 cap->n_nodes_max = HNS3_MAX_TC_NUM;
755 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
756 cap->n_nodes_leaf_max = 0;
759 cap->non_leaf_nodes_identical = 1;
760 cap->leaf_nodes_identical = 1;
762 cap->nonleaf.shaper_private_supported = true;
763 cap->nonleaf.shaper_private_dual_rate_supported = false;
764 cap->nonleaf.shaper_private_rate_min = 0;
765 cap->nonleaf.shaper_private_rate_max =
766 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
767 cap->nonleaf.shaper_shared_n_max = 0;
768 if (level_id == HNS3_TM_NODE_LEVEL_PORT)
769 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
771 cap->nonleaf.sched_n_children_max = max_tx_queues;
772 cap->nonleaf.sched_sp_n_priorities_max = 1;
773 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
774 cap->nonleaf.sched_wfq_n_groups_max = 0;
775 cap->nonleaf.sched_wfq_weight_max = 1;
776 cap->nonleaf.stats_mask = 0;
780 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
781 struct rte_tm_level_capabilities *cap)
783 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
785 cap->n_nodes_max = max_tx_queues;
786 cap->n_nodes_nonleaf_max = 0;
787 cap->n_nodes_leaf_max = max_tx_queues;
789 cap->non_leaf_nodes_identical = 1;
790 cap->leaf_nodes_identical = 1;
792 cap->leaf.shaper_private_supported = false;
793 cap->leaf.shaper_private_dual_rate_supported = false;
794 cap->leaf.shaper_private_rate_min = 0;
795 cap->leaf.shaper_private_rate_max = 0;
796 cap->leaf.shaper_shared_n_max = 0;
797 cap->leaf.cman_head_drop_supported = false;
798 cap->leaf.cman_wred_context_private_supported = false;
799 cap->leaf.cman_wred_context_shared_n_max = 0;
800 cap->leaf.stats_mask = 0;
804 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
806 struct rte_tm_level_capabilities *cap,
807 struct rte_tm_error *error)
809 if (cap == NULL || error == NULL)
812 if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
813 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
814 error->message = "too deep level";
818 memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
820 if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
821 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
823 hns3_tm_leaf_level_capabilities_get(dev, cap);
829 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
830 enum hns3_tm_node_type node_type,
831 struct rte_tm_node_capabilities *cap)
833 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
834 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
836 cap->shaper_private_supported = true;
837 cap->shaper_private_dual_rate_supported = false;
838 cap->shaper_private_rate_min = 0;
839 cap->shaper_private_rate_max =
840 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
841 cap->shaper_shared_n_max = 0;
843 if (node_type == HNS3_TM_NODE_TYPE_PORT)
844 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
846 cap->nonleaf.sched_n_children_max = max_tx_queues;
847 cap->nonleaf.sched_sp_n_priorities_max = 1;
848 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
849 cap->nonleaf.sched_wfq_n_groups_max = 0;
850 cap->nonleaf.sched_wfq_weight_max = 1;
856 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
857 struct rte_tm_node_capabilities *cap)
859 cap->shaper_private_supported = false;
860 cap->shaper_private_dual_rate_supported = false;
861 cap->shaper_private_rate_min = 0;
862 cap->shaper_private_rate_max = 0;
863 cap->shaper_shared_n_max = 0;
865 cap->leaf.cman_head_drop_supported = false;
866 cap->leaf.cman_wred_context_private_supported = false;
867 cap->leaf.cman_wred_context_shared_n_max = 0;
873 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
875 struct rte_tm_node_capabilities *cap,
876 struct rte_tm_error *error)
878 enum hns3_tm_node_type node_type;
879 struct hns3_tm_node *tm_node;
881 if (cap == NULL || error == NULL)
884 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
885 if (tm_node == NULL) {
886 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
887 error->message = "no such node";
891 memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
893 if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
894 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
896 hns3_tm_leaf_node_capabilities_get(dev, cap);
902 hns3_tm_config_port_rate(struct hns3_hw *hw,
903 struct hns3_tm_shaper_profile *shaper_profile)
905 struct hns3_port_limit_rate_cmd *cfg;
906 struct hns3_cmd_desc desc;
907 uint32_t firmware_rate;
911 if (shaper_profile) {
912 rate = shaper_profile->profile.peak.rate;
913 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
915 firmware_rate = hw->max_tm_rate;
918 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
919 cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
920 cfg->speed = rte_cpu_to_le_32(firmware_rate);
922 ret = hns3_cmd_send(hw, &desc, 1);
924 hns3_err(hw, "failed to config port rate, ret = %d", ret);
930 hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
931 struct hns3_tm_shaper_profile *shaper_profile)
933 struct hns3_tc_limit_rate_cmd *cfg;
934 struct hns3_cmd_desc desc;
935 uint32_t firmware_rate;
939 if (shaper_profile) {
940 rate = shaper_profile->profile.peak.rate;
941 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
943 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
946 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
947 cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
948 cfg->speed = rte_cpu_to_le_32(firmware_rate);
951 ret = hns3_cmd_send(hw, &desc, 1);
953 hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
960 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
962 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
963 struct hns3_tm_conf *tm_conf = &pf->tm_conf;
964 struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
965 struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
966 struct hns3_tm_node *tm_node;
969 TAILQ_FOREACH(tm_node, tc_list, node) {
970 if (!tm_node->reference_count) {
971 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
972 error->message = "TC without queue assigned";
976 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
978 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
979 error->message = "node's TC not exist";
985 TAILQ_FOREACH(tm_node, queue_list, node) {
986 if (tm_node->id >= hw->data->nb_tx_queues) {
987 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
988 error->message = "node's queue invalid";
992 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
993 hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
994 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
995 error->message = "queue's TC not match parent's TC";
1004 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1005 struct rte_tm_error *error)
1007 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1008 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1009 struct hns3_tm_node *tm_node;
1014 tm_node = pf->tm_conf.root;
1015 if (tm_node->shaper_profile) {
1016 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1018 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1019 error->message = "fail to set port peak rate";
1025 TAILQ_FOREACH(tm_node, tc_list, node) {
1026 if (tm_node->shaper_profile == NULL)
1029 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1030 ret = hns3_tm_config_tc_rate(hw, tc_no,
1031 tm_node->shaper_profile);
1033 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1034 error->message = "fail to set TC peak rate";
1043 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1045 struct rte_tm_error *error)
1047 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1048 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1054 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1055 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1056 error->message = "device is resetting";
1057 /* don't goto fail_clear, user may try later */
1061 if (pf->tm_conf.root == NULL)
1064 /* check configure before commit make sure key configure not violated */
1065 if (!hns3_tm_configure_check(hw, error))
1068 ret = hns3_tm_hierarchy_do_commit(hw, error);
1073 pf->tm_conf.committed = true;
1077 if (clear_on_fail) {
1078 hns3_tm_conf_uninit(dev);
1079 hns3_tm_conf_init(dev);
1085 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1087 struct rte_tm_error *error)
1089 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1092 rte_spinlock_lock(&hw->lock);
1093 ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1094 rte_spinlock_unlock(&hw->lock);
1100 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1102 enum hns3_tm_node_type node_type,
1103 struct hns3_tm_shaper_profile *shaper_profile,
1104 struct rte_tm_error *error)
1106 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1110 if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1111 if (shaper_profile != NULL) {
1112 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1113 error->message = "queue node shaper not supported";
1119 if (!pf->tm_conf.committed)
1122 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1123 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1125 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1126 error->message = "fail to update port peak rate";
1133 * update TC's shaper
1135 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1136 ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1138 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1139 error->message = "fail to update TC peak rate";
1146 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1148 uint32_t shaper_profile_id,
1149 struct rte_tm_error *error)
1151 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1153 struct hns3_tm_shaper_profile *profile = NULL;
1154 struct hns3_tm_node *tm_node;
1159 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1160 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1161 error->message = "device is resetting";
1165 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1166 if (tm_node == NULL) {
1167 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1168 error->message = "no such node";
1172 if (shaper_profile_id == tm_node->params.shaper_profile_id)
1175 if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1176 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1177 if (profile == NULL) {
1178 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1179 error->message = "profile ID not exist";
1184 if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1188 if (tm_node->shaper_profile)
1189 tm_node->shaper_profile->reference_count--;
1190 tm_node->shaper_profile = profile;
1191 tm_node->params.shaper_profile_id = shaper_profile_id;
1192 if (profile != NULL)
1193 profile->reference_count++;
1199 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1201 uint32_t shaper_profile_id,
1202 struct rte_tm_error *error)
1204 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207 rte_spinlock_lock(&hw->lock);
1208 ret = hns3_tm_node_shaper_update(dev, node_id,
1209 shaper_profile_id, error);
1210 rte_spinlock_unlock(&hw->lock);
1215 static const struct rte_tm_ops hns3_tm_ops = {
1216 .capabilities_get = hns3_tm_capabilities_get,
1217 .shaper_profile_add = hns3_tm_shaper_profile_add,
1218 .shaper_profile_delete = hns3_tm_shaper_profile_del,
1219 .node_add = hns3_tm_node_add,
1220 .node_delete = hns3_tm_node_delete,
1221 .node_type_get = hns3_tm_node_type_get,
1222 .level_capabilities_get = hns3_tm_level_capabilities_get,
1223 .node_capabilities_get = hns3_tm_node_capabilities_get,
1224 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
1225 .node_shaper_update = hns3_tm_node_shaper_update_wrap,
1229 hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
1231 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236 if (!hns3_dev_get_support(hw, TM))
1239 *(const void **)arg = &hns3_tm_ops;
1245 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1247 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1249 if (!hns3_dev_get_support(hw, TM))
1252 if (pf->tm_conf.root && !pf->tm_conf.committed)
1254 "please call hierarchy_commit() before starting the port.");
1258 * We need clear tm_conf committed flag when device stop so that user can modify
1259 * tm configuration (e.g. add or delete node).
1261 * If user don't call hierarchy commit when device start later, the Port/TC's
1262 * shaper rate still the same as previous committed.
1264 * To avoid the above problem, we need recover Port/TC shaper rate when device
1268 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1270 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1271 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1272 struct hns3_tm_node *tm_node;
1275 if (!pf->tm_conf.committed)
1278 tm_node = pf->tm_conf.root;
1279 if (tm_node != NULL && tm_node->shaper_profile)
1280 (void)hns3_tm_config_port_rate(hw, NULL);
1282 TAILQ_FOREACH(tm_node, tc_list, node) {
1283 if (tm_node->shaper_profile == NULL)
1285 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1286 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1289 pf->tm_conf.committed = false;
1293 hns3_tm_conf_update(struct hns3_hw *hw)
1295 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1296 struct rte_tm_error error;
1298 if (!hns3_dev_get_support(hw, TM))
1301 if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1304 memset(&error, 0, sizeof(struct rte_tm_error));
1305 return hns3_tm_hierarchy_do_commit(hw, &error);