1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 HiSilicon Limited.
5 #include <rte_malloc.h>
7 #include "hns3_ethdev.h"
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
16 * This API will called in pci device probe stage, we can't call
17 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18 * not setup), so we call the hns3_dev_infos_get.
20 struct rte_eth_dev_info dev_info;
22 memset(&dev_info, 0, sizeof(dev_info));
23 (void)hns3_dev_infos_get(dev, &dev_info);
24 return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
30 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
32 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
34 if (!hns3_dev_tm_supported(hw))
37 pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
38 pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
39 pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
41 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
42 pf->tm_conf.nb_shaper_profile = 0;
44 pf->tm_conf.root = NULL;
45 TAILQ_INIT(&pf->tm_conf.tc_list);
46 TAILQ_INIT(&pf->tm_conf.queue_list);
47 pf->tm_conf.nb_tc_node = 0;
48 pf->tm_conf.nb_queue_node = 0;
50 pf->tm_conf.committed = false;
54 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
56 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
57 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
58 struct hns3_tm_shaper_profile *shaper_profile;
59 struct hns3_tm_node *tm_node;
61 if (!hns3_dev_tm_supported(hw))
64 if (pf->tm_conf.nb_queue_node > 0) {
65 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
66 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
69 pf->tm_conf.nb_queue_node = 0;
72 if (pf->tm_conf.nb_tc_node > 0) {
73 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
74 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
77 pf->tm_conf.nb_tc_node = 0;
80 if (pf->tm_conf.root != NULL) {
81 rte_free(pf->tm_conf.root);
82 pf->tm_conf.root = NULL;
85 if (pf->tm_conf.nb_shaper_profile > 0) {
86 while ((shaper_profile =
87 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
88 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
89 shaper_profile, node);
90 rte_free(shaper_profile);
92 pf->tm_conf.nb_shaper_profile = 0;
95 pf->tm_conf.nb_leaf_nodes_max = 0;
96 pf->tm_conf.nb_nodes_max = 0;
97 pf->tm_conf.nb_shaper_profile_max = 0;
100 static inline uint64_t
101 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
103 #define FIRMWARE_TO_TM_RATE_SCALE 125000
104 /* tm rate unit is Bps, firmware rate is Mbps */
105 return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
108 static inline uint32_t
109 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
111 #define TM_TO_FIRMWARE_RATE_SCALE 125000
112 /* tm rate unit is Bps, firmware rate is Mbps */
113 return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
117 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
118 struct rte_tm_capabilities *cap,
119 struct rte_tm_error *error)
121 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
122 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
124 if (cap == NULL || error == NULL)
127 error->type = RTE_TM_ERROR_TYPE_NONE;
129 memset(cap, 0, sizeof(struct rte_tm_capabilities));
131 cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
132 cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
133 cap->non_leaf_nodes_identical = 1;
134 cap->leaf_nodes_identical = 1;
135 cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
136 cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
137 cap->shaper_private_dual_rate_n_max = 0;
138 cap->shaper_private_rate_min = 0;
139 cap->shaper_private_rate_max =
140 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
141 cap->shaper_shared_n_max = 0;
142 cap->shaper_shared_n_nodes_per_shaper_max = 0;
143 cap->shaper_shared_n_shapers_per_node_max = 0;
144 cap->shaper_shared_dual_rate_n_max = 0;
145 cap->shaper_shared_rate_min = 0;
146 cap->shaper_shared_rate_max = 0;
148 cap->sched_n_children_max = max_tx_queues;
149 cap->sched_sp_n_priorities_max = 1;
150 cap->sched_wfq_n_children_per_group_max = 0;
151 cap->sched_wfq_n_groups_max = 0;
152 cap->sched_wfq_weight_max = 1;
154 cap->cman_head_drop_supported = 0;
155 cap->dynamic_update_mask = 0;
156 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
157 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
158 cap->cman_wred_context_n_max = 0;
159 cap->cman_wred_context_private_n_max = 0;
160 cap->cman_wred_context_shared_n_max = 0;
161 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
162 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
168 static struct hns3_tm_shaper_profile *
169 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
170 uint32_t shaper_profile_id)
172 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
173 struct hns3_shaper_profile_list *shaper_profile_list =
174 &pf->tm_conf.shaper_profile_list;
175 struct hns3_tm_shaper_profile *shaper_profile;
177 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
178 if (shaper_profile_id == shaper_profile->shaper_profile_id)
179 return shaper_profile;
186 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
187 struct rte_tm_shaper_params *profile,
188 struct rte_tm_error *error)
190 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
192 if (profile->committed.rate) {
193 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
194 error->message = "committed rate not supported";
198 if (profile->committed.size) {
199 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
200 error->message = "committed bucket size not supported";
204 if (profile->peak.rate >
205 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
206 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
207 error->message = "peak rate too large";
211 if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
212 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
213 error->message = "peak rate must be at least 1Mbps";
217 if (profile->peak.size) {
218 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
219 error->message = "peak bucket size not supported";
223 if (profile->pkt_length_adjust) {
224 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
225 error->message = "packet length adjustment not supported";
229 if (profile->packet_mode) {
230 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
231 error->message = "packet mode not supported";
239 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
240 uint32_t shaper_profile_id,
241 struct rte_tm_shaper_params *profile,
242 struct rte_tm_error *error)
244 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
245 struct hns3_tm_shaper_profile *shaper_profile;
248 if (profile == NULL || error == NULL)
251 if (pf->tm_conf.nb_shaper_profile >=
252 pf->tm_conf.nb_shaper_profile_max) {
253 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
254 error->message = "too much profiles";
258 ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
262 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
263 if (shaper_profile) {
264 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
265 error->message = "profile ID exist";
269 shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
270 sizeof(struct hns3_tm_shaper_profile),
272 if (shaper_profile == NULL)
275 shaper_profile->shaper_profile_id = shaper_profile_id;
276 memcpy(&shaper_profile->profile, profile,
277 sizeof(struct rte_tm_shaper_params));
278 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
279 shaper_profile, node);
280 pf->tm_conf.nb_shaper_profile++;
286 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
287 uint32_t shaper_profile_id,
288 struct rte_tm_error *error)
290 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
291 struct hns3_tm_shaper_profile *shaper_profile;
296 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
297 if (shaper_profile == NULL) {
298 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
299 error->message = "profile ID not exist";
303 if (shaper_profile->reference_count) {
304 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
305 error->message = "profile in use";
309 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
310 rte_free(shaper_profile);
311 pf->tm_conf.nb_shaper_profile--;
316 static struct hns3_tm_node *
317 hns3_tm_node_search(struct rte_eth_dev *dev,
319 enum hns3_tm_node_type *node_type)
321 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
322 struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
323 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
324 struct hns3_tm_node *tm_node;
326 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
327 *node_type = HNS3_TM_NODE_TYPE_PORT;
328 return pf->tm_conf.root;
331 TAILQ_FOREACH(tm_node, tc_list, node) {
332 if (tm_node->id == node_id) {
333 *node_type = HNS3_TM_NODE_TYPE_TC;
338 TAILQ_FOREACH(tm_node, queue_list, node) {
339 if (tm_node->id == node_id) {
340 *node_type = HNS3_TM_NODE_TYPE_QUEUE;
349 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
350 struct rte_tm_node_params *params,
351 struct rte_tm_error *error)
353 struct hns3_tm_shaper_profile *shaper_profile;
355 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
356 shaper_profile = hns3_tm_shaper_profile_search(dev,
357 params->shaper_profile_id);
358 if (shaper_profile == NULL) {
360 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
361 error->message = "shaper profile not exist";
366 if (params->nonleaf.wfq_weight_mode) {
368 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
369 error->message = "WFQ not supported";
373 if (params->nonleaf.n_sp_priorities != 1) {
375 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
376 error->message = "SP priority not supported";
384 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
385 struct rte_tm_node_params *params,
386 struct rte_tm_error *error)
389 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
391 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
392 error->message = "shaper not supported";
396 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
397 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
398 error->message = "congestion management not supported";
402 if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
404 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
405 error->message = "WRED not supported";
409 if (params->leaf.wred.shared_wred_context_id) {
411 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
412 error->message = "WRED not supported";
416 if (params->leaf.wred.n_shared_wred_contexts) {
418 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
419 error->message = "WRED not supported";
427 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
428 uint32_t priority, uint32_t weight,
429 struct rte_tm_node_params *params,
430 struct rte_tm_error *error)
432 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
433 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
435 if (node_id == RTE_TM_NODE_ID_NULL) {
436 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
437 error->message = "invalid node id";
441 if (hns3_tm_node_search(dev, node_id, &node_type)) {
442 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
443 error->message = "node id already used";
448 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
449 error->message = "priority should be 0";
454 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
455 error->message = "weight must be 1";
459 if (params->shared_shaper_id) {
460 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
461 error->message = "shared shaper not supported";
464 if (params->n_shared_shapers) {
465 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
466 error->message = "shared shaper not supported";
470 if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
471 return hns3_tm_nonleaf_node_param_check(dev, params, error);
473 return hns3_tm_leaf_node_param_check(dev, params, error);
477 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
478 uint32_t level_id, struct rte_tm_node_params *params,
479 struct rte_tm_error *error)
481 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
482 struct hns3_tm_node *tm_node;
484 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
485 level_id != HNS3_TM_NODE_LEVEL_PORT) {
486 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
487 error->message = "wrong level";
491 if (node_id != pf->tm_conf.nb_nodes_max - 1) {
492 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
493 error->message = "invalid port node ID";
497 if (pf->tm_conf.root) {
498 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
499 error->message = "already have a root";
503 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
507 tm_node->id = node_id;
508 tm_node->reference_count = 0;
509 tm_node->parent = NULL;
510 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
511 params->shaper_profile_id);
512 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
513 pf->tm_conf.root = tm_node;
515 if (tm_node->shaper_profile)
516 tm_node->shaper_profile->reference_count++;
522 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
523 uint32_t level_id, struct hns3_tm_node *parent_node,
524 struct rte_tm_node_params *params,
525 struct rte_tm_error *error)
527 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
528 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
529 struct hns3_tm_node *tm_node;
531 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
532 level_id != HNS3_TM_NODE_LEVEL_TC) {
533 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
534 error->message = "wrong level";
538 if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
539 node_id < pf->tm_conf.nb_leaf_nodes_max ||
540 hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
541 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
542 error->message = "invalid tc node ID";
546 if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
547 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
548 error->message = "too many TCs";
552 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
556 tm_node->id = node_id;
557 tm_node->reference_count = 0;
558 tm_node->parent = parent_node;
559 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
560 params->shaper_profile_id);
561 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
562 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
563 pf->tm_conf.nb_tc_node++;
564 tm_node->parent->reference_count++;
566 if (tm_node->shaper_profile)
567 tm_node->shaper_profile->reference_count++;
573 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
574 uint32_t level_id, struct hns3_tm_node *parent_node,
575 struct rte_tm_node_params *params,
576 struct rte_tm_error *error)
578 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
580 struct hns3_tm_node *tm_node;
582 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
583 level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
584 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
585 error->message = "wrong level";
589 /* note: dev->data->nb_tx_queues <= max_tx_queues */
590 if (node_id >= dev->data->nb_tx_queues) {
591 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
592 error->message = "invalid queue node ID";
596 if (hns3_txq_mapped_tc_get(hw, node_id) !=
597 hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
598 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
599 error->message = "queue's TC not match parent's TC";
603 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
607 tm_node->id = node_id;
608 tm_node->reference_count = 0;
609 tm_node->parent = parent_node;
610 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
611 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
612 pf->tm_conf.nb_queue_node++;
613 tm_node->parent->reference_count++;
619 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
620 uint32_t parent_node_id, uint32_t priority,
621 uint32_t weight, uint32_t level_id,
622 struct rte_tm_node_params *params,
623 struct rte_tm_error *error)
625 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
626 enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
627 struct hns3_tm_node *parent_node;
630 if (params == NULL || error == NULL)
633 if (pf->tm_conf.committed) {
634 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
635 error->message = "already committed";
639 ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
644 /* root node who don't have a parent */
645 if (parent_node_id == RTE_TM_NODE_ID_NULL)
646 return hns3_tm_port_node_add(dev, node_id, level_id,
649 parent_node = hns3_tm_node_search(dev, parent_node_id,
651 if (parent_node == NULL) {
652 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
653 error->message = "parent not exist";
657 if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
658 parent_node_type != HNS3_TM_NODE_TYPE_TC) {
659 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
660 error->message = "parent is not port or TC";
664 if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
665 return hns3_tm_tc_node_add(dev, node_id, level_id,
666 parent_node, params, error);
668 return hns3_tm_queue_node_add(dev, node_id, level_id,
669 parent_node, params, error);
673 hns3_tm_node_do_delete(struct hns3_pf *pf,
674 enum hns3_tm_node_type node_type,
675 struct hns3_tm_node *tm_node)
677 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
678 if (tm_node->shaper_profile)
679 tm_node->shaper_profile->reference_count--;
681 pf->tm_conf.root = NULL;
685 if (tm_node->shaper_profile)
686 tm_node->shaper_profile->reference_count--;
687 tm_node->parent->reference_count--;
688 if (node_type == HNS3_TM_NODE_TYPE_TC) {
689 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
690 pf->tm_conf.nb_tc_node--;
692 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
693 pf->tm_conf.nb_queue_node--;
699 hns3_tm_node_delete(struct rte_eth_dev *dev,
701 struct rte_tm_error *error)
703 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
704 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
705 struct hns3_tm_node *tm_node;
710 if (pf->tm_conf.committed) {
711 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
712 error->message = "already committed";
716 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
717 if (tm_node == NULL) {
718 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
719 error->message = "no such node";
723 if (tm_node->reference_count) {
724 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
725 error->message = "cannot delete a node which has children";
729 hns3_tm_node_do_delete(pf, node_type, tm_node);
735 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
736 int *is_leaf, struct rte_tm_error *error)
738 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
739 struct hns3_tm_node *tm_node;
741 if (is_leaf == NULL || error == NULL)
744 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
745 if (tm_node == NULL) {
746 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
747 error->message = "no such node";
751 if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
760 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
762 struct rte_tm_level_capabilities *cap)
764 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
765 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
767 if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
768 cap->n_nodes_max = 1;
769 cap->n_nodes_nonleaf_max = 1;
770 cap->n_nodes_leaf_max = 0;
772 cap->n_nodes_max = HNS3_MAX_TC_NUM;
773 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
774 cap->n_nodes_leaf_max = 0;
777 cap->non_leaf_nodes_identical = 1;
778 cap->leaf_nodes_identical = 1;
780 cap->nonleaf.shaper_private_supported = true;
781 cap->nonleaf.shaper_private_dual_rate_supported = false;
782 cap->nonleaf.shaper_private_rate_min = 0;
783 cap->nonleaf.shaper_private_rate_max =
784 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
785 cap->nonleaf.shaper_shared_n_max = 0;
786 if (level_id == HNS3_TM_NODE_LEVEL_PORT)
787 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
789 cap->nonleaf.sched_n_children_max = max_tx_queues;
790 cap->nonleaf.sched_sp_n_priorities_max = 1;
791 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
792 cap->nonleaf.sched_wfq_n_groups_max = 0;
793 cap->nonleaf.sched_wfq_weight_max = 1;
794 cap->nonleaf.stats_mask = 0;
798 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
799 struct rte_tm_level_capabilities *cap)
801 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
803 cap->n_nodes_max = max_tx_queues;
804 cap->n_nodes_nonleaf_max = 0;
805 cap->n_nodes_leaf_max = max_tx_queues;
807 cap->non_leaf_nodes_identical = 1;
808 cap->leaf_nodes_identical = 1;
810 cap->leaf.shaper_private_supported = false;
811 cap->leaf.shaper_private_dual_rate_supported = false;
812 cap->leaf.shaper_private_rate_min = 0;
813 cap->leaf.shaper_private_rate_max = 0;
814 cap->leaf.shaper_shared_n_max = 0;
815 cap->leaf.cman_head_drop_supported = false;
816 cap->leaf.cman_wred_context_private_supported = false;
817 cap->leaf.cman_wred_context_shared_n_max = 0;
818 cap->leaf.stats_mask = 0;
822 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
824 struct rte_tm_level_capabilities *cap,
825 struct rte_tm_error *error)
827 if (cap == NULL || error == NULL)
830 if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
831 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
832 error->message = "too deep level";
836 memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
838 if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
839 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
841 hns3_tm_leaf_level_capabilities_get(dev, cap);
847 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
848 enum hns3_tm_node_type node_type,
849 struct rte_tm_node_capabilities *cap)
851 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
854 cap->shaper_private_supported = true;
855 cap->shaper_private_dual_rate_supported = false;
856 cap->shaper_private_rate_min = 0;
857 cap->shaper_private_rate_max =
858 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
859 cap->shaper_shared_n_max = 0;
861 if (node_type == HNS3_TM_NODE_TYPE_PORT)
862 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
864 cap->nonleaf.sched_n_children_max = max_tx_queues;
865 cap->nonleaf.sched_sp_n_priorities_max = 1;
866 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
867 cap->nonleaf.sched_wfq_n_groups_max = 0;
868 cap->nonleaf.sched_wfq_weight_max = 1;
874 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
875 struct rte_tm_node_capabilities *cap)
877 cap->shaper_private_supported = false;
878 cap->shaper_private_dual_rate_supported = false;
879 cap->shaper_private_rate_min = 0;
880 cap->shaper_private_rate_max = 0;
881 cap->shaper_shared_n_max = 0;
883 cap->leaf.cman_head_drop_supported = false;
884 cap->leaf.cman_wred_context_private_supported = false;
885 cap->leaf.cman_wred_context_shared_n_max = 0;
891 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
893 struct rte_tm_node_capabilities *cap,
894 struct rte_tm_error *error)
896 enum hns3_tm_node_type node_type;
897 struct hns3_tm_node *tm_node;
899 if (cap == NULL || error == NULL)
902 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
903 if (tm_node == NULL) {
904 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
905 error->message = "no such node";
909 memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
911 if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
912 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
914 hns3_tm_leaf_node_capabilities_get(dev, cap);
920 hns3_tm_config_port_rate(struct hns3_hw *hw,
921 struct hns3_tm_shaper_profile *shaper_profile)
923 struct hns3_port_limit_rate_cmd *cfg;
924 struct hns3_cmd_desc desc;
925 uint32_t firmware_rate;
929 if (shaper_profile) {
930 rate = shaper_profile->profile.peak.rate;
931 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
933 firmware_rate = hw->max_tm_rate;
936 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
937 cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
938 cfg->speed = rte_cpu_to_le_32(firmware_rate);
940 ret = hns3_cmd_send(hw, &desc, 1);
942 hns3_err(hw, "failed to config port rate, ret = %d", ret);
948 hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
949 struct hns3_tm_shaper_profile *shaper_profile)
951 struct hns3_tc_limit_rate_cmd *cfg;
952 struct hns3_cmd_desc desc;
953 uint32_t firmware_rate;
957 if (shaper_profile) {
958 rate = shaper_profile->profile.peak.rate;
959 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
961 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
964 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
965 cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
966 cfg->speed = rte_cpu_to_le_32(firmware_rate);
969 ret = hns3_cmd_send(hw, &desc, 1);
971 hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
978 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
980 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
981 struct hns3_tm_conf *tm_conf = &pf->tm_conf;
982 struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
983 struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
984 struct hns3_tm_node *tm_node;
987 TAILQ_FOREACH(tm_node, tc_list, node) {
988 if (!tm_node->reference_count) {
989 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
990 error->message = "TC without queue assigned";
994 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
996 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
997 error->message = "node's TC not exist";
1003 TAILQ_FOREACH(tm_node, queue_list, node) {
1004 if (tm_node->id >= hw->data->nb_tx_queues) {
1005 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1006 error->message = "node's queue invalid";
1010 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
1011 hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
1012 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1013 error->message = "queue's TC not match parent's TC";
1022 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1023 struct rte_tm_error *error)
1025 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1026 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1027 struct hns3_tm_node *tm_node;
1032 tm_node = pf->tm_conf.root;
1033 if (tm_node->shaper_profile) {
1034 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1036 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1037 error->message = "fail to set port peak rate";
1043 TAILQ_FOREACH(tm_node, tc_list, node) {
1044 if (tm_node->shaper_profile == NULL)
1047 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1048 ret = hns3_tm_config_tc_rate(hw, tc_no,
1049 tm_node->shaper_profile);
1051 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1052 error->message = "fail to set TC peak rate";
1061 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1063 struct rte_tm_error *error)
1065 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1072 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1073 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1074 error->message = "device is resetting";
1075 /* don't goto fail_clear, user may try later */
1079 if (pf->tm_conf.root == NULL)
1082 /* check configure before commit make sure key configure not violated */
1083 if (!hns3_tm_configure_check(hw, error))
1086 ret = hns3_tm_hierarchy_do_commit(hw, error);
1091 pf->tm_conf.committed = true;
1095 if (clear_on_fail) {
1096 hns3_tm_conf_uninit(dev);
1097 hns3_tm_conf_init(dev);
1103 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1105 struct rte_tm_error *error)
1107 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1110 rte_spinlock_lock(&hw->lock);
1111 ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1112 rte_spinlock_unlock(&hw->lock);
1118 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1120 enum hns3_tm_node_type node_type,
1121 struct hns3_tm_shaper_profile *shaper_profile,
1122 struct rte_tm_error *error)
1124 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1128 if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1129 if (shaper_profile != NULL) {
1130 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1131 error->message = "queue node shaper not supported";
1137 if (!pf->tm_conf.committed)
1140 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1141 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1143 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1144 error->message = "fail to update port peak rate";
1151 * update TC's shaper
1153 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1154 ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1156 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1157 error->message = "fail to update TC peak rate";
1164 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1166 uint32_t shaper_profile_id,
1167 struct rte_tm_error *error)
1169 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1171 struct hns3_tm_shaper_profile *profile = NULL;
1172 struct hns3_tm_node *tm_node;
1177 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1178 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1179 error->message = "device is resetting";
1183 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1184 if (tm_node == NULL) {
1185 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1186 error->message = "no such node";
1190 if (shaper_profile_id == tm_node->params.shaper_profile_id)
1193 if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1194 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1195 if (profile == NULL) {
1196 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1197 error->message = "profile ID not exist";
1202 if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1206 if (tm_node->shaper_profile)
1207 tm_node->shaper_profile->reference_count--;
1208 tm_node->shaper_profile = profile;
1209 tm_node->params.shaper_profile_id = shaper_profile_id;
1210 if (profile != NULL)
1211 profile->reference_count++;
1217 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1219 uint32_t shaper_profile_id,
1220 struct rte_tm_error *error)
1222 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1225 rte_spinlock_lock(&hw->lock);
1226 ret = hns3_tm_node_shaper_update(dev, node_id,
1227 shaper_profile_id, error);
1228 rte_spinlock_unlock(&hw->lock);
1233 static const struct rte_tm_ops hns3_tm_ops = {
1234 .capabilities_get = hns3_tm_capabilities_get,
1235 .shaper_profile_add = hns3_tm_shaper_profile_add,
1236 .shaper_profile_delete = hns3_tm_shaper_profile_del,
1237 .node_add = hns3_tm_node_add,
1238 .node_delete = hns3_tm_node_delete,
1239 .node_type_get = hns3_tm_node_type_get,
1240 .level_capabilities_get = hns3_tm_level_capabilities_get,
1241 .node_capabilities_get = hns3_tm_node_capabilities_get,
1242 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
1243 .node_shaper_update = hns3_tm_node_shaper_update_wrap,
1247 hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
1249 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1254 if (!hns3_dev_tm_supported(hw))
1257 *(const void **)arg = &hns3_tm_ops;
1263 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1265 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1267 if (!hns3_dev_tm_supported(hw))
1270 if (pf->tm_conf.root && !pf->tm_conf.committed)
1272 "please call hierarchy_commit() before starting the port.");
1276 * We need clear tm_conf committed flag when device stop so that user can modify
1277 * tm configuration (e.g. add or delete node).
1279 * If user don't call hierarchy commit when device start later, the Port/TC's
1280 * shaper rate still the same as previous committed.
1282 * To avoid the above problem, we need recover Port/TC shaper rate when device
1286 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1288 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1289 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1290 struct hns3_tm_node *tm_node;
1293 if (!pf->tm_conf.committed)
1296 tm_node = pf->tm_conf.root;
1297 if (tm_node != NULL && tm_node->shaper_profile)
1298 (void)hns3_tm_config_port_rate(hw, NULL);
1300 TAILQ_FOREACH(tm_node, tc_list, node) {
1301 if (tm_node->shaper_profile == NULL)
1303 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1304 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1307 pf->tm_conf.committed = false;
1311 hns3_tm_conf_update(struct hns3_hw *hw)
1313 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1314 struct rte_tm_error error;
1316 if (!hns3_dev_tm_supported(hw))
1319 if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1322 memset(&error, 0, sizeof(struct rte_tm_error));
1323 return hns3_tm_hierarchy_do_commit(hw, &error);