1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2020 Hisilicon Limited.
5 #include <rte_malloc.h>
7 #include "hns3_ethdev.h"
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
16 * This API will called in pci device probe stage, we can't call
17 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18 * not setup), so we call the hns3_dev_infos_get.
20 struct rte_eth_dev_info dev_info;
22 memset(&dev_info, 0, sizeof(dev_info));
23 (void)hns3_dev_infos_get(dev, &dev_info);
24 return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
30 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
33 pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
34 pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
35 pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
37 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
38 pf->tm_conf.nb_shaper_profile = 0;
40 pf->tm_conf.root = NULL;
41 TAILQ_INIT(&pf->tm_conf.tc_list);
42 TAILQ_INIT(&pf->tm_conf.queue_list);
43 pf->tm_conf.nb_tc_node = 0;
44 pf->tm_conf.nb_queue_node = 0;
46 pf->tm_conf.committed = false;
50 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
52 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
53 struct hns3_tm_shaper_profile *shaper_profile;
54 struct hns3_tm_node *tm_node;
56 if (pf->tm_conf.nb_queue_node > 0) {
57 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
58 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
61 pf->tm_conf.nb_queue_node = 0;
64 if (pf->tm_conf.nb_tc_node > 0) {
65 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
66 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
69 pf->tm_conf.nb_tc_node = 0;
72 if (pf->tm_conf.root != NULL) {
73 rte_free(pf->tm_conf.root);
74 pf->tm_conf.root = NULL;
77 if (pf->tm_conf.nb_shaper_profile > 0) {
78 while ((shaper_profile =
79 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
80 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
81 shaper_profile, node);
82 rte_free(shaper_profile);
84 pf->tm_conf.nb_shaper_profile = 0;
87 pf->tm_conf.nb_leaf_nodes_max = 0;
88 pf->tm_conf.nb_nodes_max = 0;
89 pf->tm_conf.nb_shaper_profile_max = 0;
92 static inline uint64_t
93 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
95 #define FIRMWARE_TO_TM_RATE_SCALE 125000
96 /* tm rate unit is Bps, firmware rate is Mbps */
97 return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
100 static inline uint32_t
101 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
103 #define TM_TO_FIRMWARE_RATE_SCALE 125000
104 /* tm rate unit is Bps, firmware rate is Mbps */
105 return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
109 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
110 struct rte_tm_capabilities *cap,
111 struct rte_tm_error *error)
113 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
114 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
116 if (cap == NULL || error == NULL)
119 error->type = RTE_TM_ERROR_TYPE_NONE;
121 memset(cap, 0, sizeof(struct rte_tm_capabilities));
123 cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
124 cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
125 cap->non_leaf_nodes_identical = 1;
126 cap->leaf_nodes_identical = 1;
127 cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
128 cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
129 cap->shaper_private_dual_rate_n_max = 0;
130 cap->shaper_private_rate_min = 0;
131 cap->shaper_private_rate_max =
132 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
133 cap->shaper_shared_n_max = 0;
134 cap->shaper_shared_n_nodes_per_shaper_max = 0;
135 cap->shaper_shared_n_shapers_per_node_max = 0;
136 cap->shaper_shared_dual_rate_n_max = 0;
137 cap->shaper_shared_rate_min = 0;
138 cap->shaper_shared_rate_max = 0;
140 cap->sched_n_children_max = max_tx_queues;
141 cap->sched_sp_n_priorities_max = 1;
142 cap->sched_wfq_n_children_per_group_max = 0;
143 cap->sched_wfq_n_groups_max = 0;
144 cap->sched_wfq_weight_max = 1;
146 cap->cman_head_drop_supported = 0;
147 cap->dynamic_update_mask = 0;
148 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
149 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
150 cap->cman_wred_context_n_max = 0;
151 cap->cman_wred_context_private_n_max = 0;
152 cap->cman_wred_context_shared_n_max = 0;
153 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
154 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
160 static struct hns3_tm_shaper_profile *
161 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
162 uint32_t shaper_profile_id)
164 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
165 struct hns3_shaper_profile_list *shaper_profile_list =
166 &pf->tm_conf.shaper_profile_list;
167 struct hns3_tm_shaper_profile *shaper_profile;
169 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
170 if (shaper_profile_id == shaper_profile->shaper_profile_id)
171 return shaper_profile;
178 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
179 struct rte_tm_shaper_params *profile,
180 struct rte_tm_error *error)
182 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
184 if (profile->committed.rate) {
185 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
186 error->message = "committed rate not supported";
190 if (profile->committed.size) {
191 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
192 error->message = "committed bucket size not supported";
196 if (profile->peak.rate >
197 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
198 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
199 error->message = "peak rate too large";
203 if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
204 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
205 error->message = "peak rate must be at least 1Mbps";
209 if (profile->peak.size) {
210 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
211 error->message = "peak bucket size not supported";
215 if (profile->pkt_length_adjust) {
216 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
217 error->message = "packet length adjustment not supported";
221 if (profile->packet_mode) {
222 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
223 error->message = "packet mode not supported";
231 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
232 uint32_t shaper_profile_id,
233 struct rte_tm_shaper_params *profile,
234 struct rte_tm_error *error)
236 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
237 struct hns3_tm_shaper_profile *shaper_profile;
240 if (profile == NULL || error == NULL)
243 if (pf->tm_conf.nb_shaper_profile >=
244 pf->tm_conf.nb_shaper_profile_max) {
245 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
246 error->message = "too much profiles";
250 ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
254 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
255 if (shaper_profile) {
256 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
257 error->message = "profile ID exist";
261 shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
262 sizeof(struct hns3_tm_shaper_profile),
264 if (shaper_profile == NULL)
267 shaper_profile->shaper_profile_id = shaper_profile_id;
268 memcpy(&shaper_profile->profile, profile,
269 sizeof(struct rte_tm_shaper_params));
270 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
271 shaper_profile, node);
272 pf->tm_conf.nb_shaper_profile++;
278 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
279 uint32_t shaper_profile_id,
280 struct rte_tm_error *error)
282 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
283 struct hns3_tm_shaper_profile *shaper_profile;
288 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
289 if (shaper_profile == NULL) {
290 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
291 error->message = "profile ID not exist";
295 if (shaper_profile->reference_count) {
296 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
297 error->message = "profile in use";
301 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
302 rte_free(shaper_profile);
303 pf->tm_conf.nb_shaper_profile--;
308 static struct hns3_tm_node *
309 hns3_tm_node_search(struct rte_eth_dev *dev,
311 enum hns3_tm_node_type *node_type)
313 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
314 struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
315 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
316 struct hns3_tm_node *tm_node;
318 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
319 *node_type = HNS3_TM_NODE_TYPE_PORT;
320 return pf->tm_conf.root;
323 TAILQ_FOREACH(tm_node, tc_list, node) {
324 if (tm_node->id == node_id) {
325 *node_type = HNS3_TM_NODE_TYPE_TC;
330 TAILQ_FOREACH(tm_node, queue_list, node) {
331 if (tm_node->id == node_id) {
332 *node_type = HNS3_TM_NODE_TYPE_QUEUE;
341 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
342 struct rte_tm_node_params *params,
343 struct rte_tm_error *error)
345 struct hns3_tm_shaper_profile *shaper_profile;
347 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
348 shaper_profile = hns3_tm_shaper_profile_search(dev,
349 params->shaper_profile_id);
350 if (shaper_profile == NULL) {
352 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
353 error->message = "shaper profile not exist";
358 if (params->nonleaf.wfq_weight_mode) {
360 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
361 error->message = "WFQ not supported";
365 if (params->nonleaf.n_sp_priorities != 1) {
367 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
368 error->message = "SP priority not supported";
376 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
377 struct rte_tm_node_params *params,
378 struct rte_tm_error *error)
381 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
383 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
384 error->message = "shaper not supported";
388 if (params->leaf.cman) {
389 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
390 error->message = "congestion management not supported";
394 if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
396 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
397 error->message = "WRED not supported";
401 if (params->leaf.wred.shared_wred_context_id) {
403 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
404 error->message = "WRED not supported";
408 if (params->leaf.wred.n_shared_wred_contexts) {
410 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
411 error->message = "WRED not supported";
419 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
420 uint32_t priority, uint32_t weight,
421 struct rte_tm_node_params *params,
422 struct rte_tm_error *error)
424 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
425 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
427 if (node_id == RTE_TM_NODE_ID_NULL) {
428 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
429 error->message = "invalid node id";
433 if (hns3_tm_node_search(dev, node_id, &node_type)) {
434 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
435 error->message = "node id already used";
440 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
441 error->message = "priority should be 0";
446 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
447 error->message = "weight must be 1";
451 if (params->shared_shaper_id) {
452 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
453 error->message = "shared shaper not supported";
456 if (params->n_shared_shapers) {
457 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
458 error->message = "shared shaper not supported";
462 if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
463 return hns3_tm_nonleaf_node_param_check(dev, params, error);
465 return hns3_tm_leaf_node_param_check(dev, params, error);
469 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
470 uint32_t level_id, struct rte_tm_node_params *params,
471 struct rte_tm_error *error)
473 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
474 struct hns3_tm_node *tm_node;
476 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
477 level_id != HNS3_TM_NODE_LEVEL_PORT) {
478 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
479 error->message = "wrong level";
483 if (node_id != pf->tm_conf.nb_nodes_max - 1) {
484 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
485 error->message = "invalid port node ID";
489 if (pf->tm_conf.root) {
490 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
491 error->message = "already have a root";
495 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
499 tm_node->id = node_id;
500 tm_node->reference_count = 0;
501 tm_node->parent = NULL;
502 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
503 params->shaper_profile_id);
504 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
505 pf->tm_conf.root = tm_node;
507 if (tm_node->shaper_profile)
508 tm_node->shaper_profile->reference_count++;
514 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
515 uint32_t level_id, struct hns3_tm_node *parent_node,
516 struct rte_tm_node_params *params,
517 struct rte_tm_error *error)
519 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
520 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
521 struct hns3_tm_node *tm_node;
523 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
524 level_id != HNS3_TM_NODE_LEVEL_TC) {
525 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
526 error->message = "wrong level";
530 if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
531 node_id < pf->tm_conf.nb_leaf_nodes_max ||
532 hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
533 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
534 error->message = "invalid tc node ID";
538 if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
539 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
540 error->message = "too many TCs";
544 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
548 tm_node->id = node_id;
549 tm_node->reference_count = 0;
550 tm_node->parent = parent_node;
551 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
552 params->shaper_profile_id);
553 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
554 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
555 pf->tm_conf.nb_tc_node++;
556 tm_node->parent->reference_count++;
558 if (tm_node->shaper_profile)
559 tm_node->shaper_profile->reference_count++;
565 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
566 uint32_t level_id, struct hns3_tm_node *parent_node,
567 struct rte_tm_node_params *params,
568 struct rte_tm_error *error)
570 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
572 struct hns3_tm_node *tm_node;
574 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
575 level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
576 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
577 error->message = "wrong level";
581 /* note: dev->data->nb_tx_queues <= max_tx_queues */
582 if (node_id >= dev->data->nb_tx_queues) {
583 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
584 error->message = "invalid queue node ID";
588 if (hns3_txq_mapped_tc_get(hw, node_id) !=
589 hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
590 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
591 error->message = "queue's TC not match parent's TC";
595 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
599 tm_node->id = node_id;
600 tm_node->reference_count = 0;
601 tm_node->parent = parent_node;
602 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
603 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
604 pf->tm_conf.nb_queue_node++;
605 tm_node->parent->reference_count++;
611 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
612 uint32_t parent_node_id, uint32_t priority,
613 uint32_t weight, uint32_t level_id,
614 struct rte_tm_node_params *params,
615 struct rte_tm_error *error)
617 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
618 enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
619 struct hns3_tm_node *parent_node;
622 if (params == NULL || error == NULL)
625 if (pf->tm_conf.committed) {
626 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
627 error->message = "already committed";
631 ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
636 /* root node who don't have a parent */
637 if (parent_node_id == RTE_TM_NODE_ID_NULL)
638 return hns3_tm_port_node_add(dev, node_id, level_id,
641 parent_node = hns3_tm_node_search(dev, parent_node_id,
643 if (parent_node == NULL) {
644 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
645 error->message = "parent not exist";
649 if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
650 parent_node_type != HNS3_TM_NODE_TYPE_TC) {
651 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
652 error->message = "parent is not port or TC";
656 if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
657 return hns3_tm_tc_node_add(dev, node_id, level_id,
658 parent_node, params, error);
660 return hns3_tm_queue_node_add(dev, node_id, level_id,
661 parent_node, params, error);
665 hns3_tm_node_do_delete(struct hns3_pf *pf,
666 enum hns3_tm_node_type node_type,
667 struct hns3_tm_node *tm_node)
669 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
670 if (tm_node->shaper_profile)
671 tm_node->shaper_profile->reference_count--;
673 pf->tm_conf.root = NULL;
677 if (tm_node->shaper_profile)
678 tm_node->shaper_profile->reference_count--;
679 tm_node->parent->reference_count--;
680 if (node_type == HNS3_TM_NODE_TYPE_TC) {
681 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
682 pf->tm_conf.nb_tc_node--;
684 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
685 pf->tm_conf.nb_queue_node--;
691 hns3_tm_node_delete(struct rte_eth_dev *dev,
693 struct rte_tm_error *error)
695 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
696 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
697 struct hns3_tm_node *tm_node;
702 if (pf->tm_conf.committed) {
703 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
704 error->message = "already committed";
708 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
709 if (tm_node == NULL) {
710 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
711 error->message = "no such node";
715 if (tm_node->reference_count) {
716 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
717 error->message = "cannot delete a node which has children";
721 hns3_tm_node_do_delete(pf, node_type, tm_node);
727 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
728 int *is_leaf, struct rte_tm_error *error)
730 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
731 struct hns3_tm_node *tm_node;
733 if (is_leaf == NULL || error == NULL)
736 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
737 if (tm_node == NULL) {
738 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
739 error->message = "no such node";
743 if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
752 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
754 struct rte_tm_level_capabilities *cap)
756 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
757 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
759 if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
760 cap->n_nodes_max = 1;
761 cap->n_nodes_nonleaf_max = 1;
762 cap->n_nodes_leaf_max = 0;
764 cap->n_nodes_max = HNS3_MAX_TC_NUM;
765 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
766 cap->n_nodes_leaf_max = 0;
769 cap->non_leaf_nodes_identical = 1;
770 cap->leaf_nodes_identical = 1;
772 cap->nonleaf.shaper_private_supported = true;
773 cap->nonleaf.shaper_private_dual_rate_supported = false;
774 cap->nonleaf.shaper_private_rate_min = 0;
775 cap->nonleaf.shaper_private_rate_max =
776 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
777 cap->nonleaf.shaper_shared_n_max = 0;
778 if (level_id == HNS3_TM_NODE_LEVEL_PORT)
779 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
781 cap->nonleaf.sched_n_children_max = max_tx_queues;
782 cap->nonleaf.sched_sp_n_priorities_max = 1;
783 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
784 cap->nonleaf.sched_wfq_n_groups_max = 0;
785 cap->nonleaf.sched_wfq_weight_max = 1;
786 cap->nonleaf.stats_mask = 0;
790 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
791 struct rte_tm_level_capabilities *cap)
793 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
795 cap->n_nodes_max = max_tx_queues;
796 cap->n_nodes_nonleaf_max = 0;
797 cap->n_nodes_leaf_max = max_tx_queues;
799 cap->non_leaf_nodes_identical = 1;
800 cap->leaf_nodes_identical = 1;
802 cap->leaf.shaper_private_supported = false;
803 cap->leaf.shaper_private_dual_rate_supported = false;
804 cap->leaf.shaper_private_rate_min = 0;
805 cap->leaf.shaper_private_rate_max = 0;
806 cap->leaf.shaper_shared_n_max = 0;
807 cap->leaf.cman_head_drop_supported = false;
808 cap->leaf.cman_wred_context_private_supported = false;
809 cap->leaf.cman_wred_context_shared_n_max = 0;
810 cap->leaf.stats_mask = 0;
814 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
816 struct rte_tm_level_capabilities *cap,
817 struct rte_tm_error *error)
819 if (cap == NULL || error == NULL)
822 if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
823 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
824 error->message = "too deep level";
828 memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
830 if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
831 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
833 hns3_tm_leaf_level_capabilities_get(dev, cap);
839 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
840 enum hns3_tm_node_type node_type,
841 struct rte_tm_node_capabilities *cap)
843 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
844 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
846 cap->shaper_private_supported = true;
847 cap->shaper_private_dual_rate_supported = false;
848 cap->shaper_private_rate_min = 0;
849 cap->shaper_private_rate_max =
850 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
851 cap->shaper_shared_n_max = 0;
853 if (node_type == HNS3_TM_NODE_TYPE_PORT)
854 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
856 cap->nonleaf.sched_n_children_max = max_tx_queues;
857 cap->nonleaf.sched_sp_n_priorities_max = 1;
858 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
859 cap->nonleaf.sched_wfq_n_groups_max = 0;
860 cap->nonleaf.sched_wfq_weight_max = 1;
866 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
867 struct rte_tm_node_capabilities *cap)
869 cap->shaper_private_supported = false;
870 cap->shaper_private_dual_rate_supported = false;
871 cap->shaper_private_rate_min = 0;
872 cap->shaper_private_rate_max = 0;
873 cap->shaper_shared_n_max = 0;
875 cap->leaf.cman_head_drop_supported = false;
876 cap->leaf.cman_wred_context_private_supported = false;
877 cap->leaf.cman_wred_context_shared_n_max = 0;
883 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
885 struct rte_tm_node_capabilities *cap,
886 struct rte_tm_error *error)
888 enum hns3_tm_node_type node_type;
889 struct hns3_tm_node *tm_node;
891 if (cap == NULL || error == NULL)
894 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
895 if (tm_node == NULL) {
896 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
897 error->message = "no such node";
901 memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
903 if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
904 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
906 hns3_tm_leaf_node_capabilities_get(dev, cap);
912 hns3_tm_config_port_rate(struct hns3_hw *hw,
913 struct hns3_tm_shaper_profile *shaper_profile)
915 uint32_t firmware_rate;
918 if (shaper_profile) {
919 rate = shaper_profile->profile.peak.rate;
920 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
922 firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
926 * The TM shaper topology after device inited:
929 * ... |----> pg0 shaper ----> port shaper
933 * Because port shaper rate maybe changed by firmware, to avoid
934 * concurrent configure, driver use pg0 shaper to achieve the rate limit
937 * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
939 return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
943 hns3_tm_config_tc_rate(struct hns3_hw *hw,
945 struct hns3_tm_shaper_profile *shaper_profile)
947 uint32_t firmware_rate;
950 if (shaper_profile) {
951 rate = shaper_profile->profile.peak.rate;
952 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
954 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
957 return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
961 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
963 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
964 struct hns3_tm_conf *tm_conf = &pf->tm_conf;
965 struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
966 struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
967 struct hns3_tm_node *tm_node;
970 TAILQ_FOREACH(tm_node, tc_list, node) {
971 if (!tm_node->reference_count) {
972 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
973 error->message = "TC without queue assigned";
977 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
979 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
980 error->message = "node's TC not exist";
986 TAILQ_FOREACH(tm_node, queue_list, node) {
987 if (tm_node->id >= hw->data->nb_tx_queues) {
988 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
989 error->message = "node's queue invalid";
993 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
994 hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
995 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
996 error->message = "queue's TC not match parent's TC";
1005 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1006 struct rte_tm_error *error)
1008 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1009 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1010 struct hns3_tm_node *tm_node;
1015 tm_node = pf->tm_conf.root;
1016 if (tm_node->shaper_profile) {
1017 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1019 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1020 error->message = "fail to set port peak rate";
1026 TAILQ_FOREACH(tm_node, tc_list, node) {
1027 if (tm_node->shaper_profile == NULL)
1030 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1031 ret = hns3_tm_config_tc_rate(hw, tc_no,
1032 tm_node->shaper_profile);
1034 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1035 error->message = "fail to set TC peak rate";
1044 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1046 struct rte_tm_error *error)
1048 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1049 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1055 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1056 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1057 error->message = "device is resetting";
1058 /* don't goto fail_clear, user may try later */
1062 if (pf->tm_conf.root == NULL)
1065 /* check configure before commit make sure key configure not violated */
1066 if (!hns3_tm_configure_check(hw, error))
1069 ret = hns3_tm_hierarchy_do_commit(hw, error);
1074 pf->tm_conf.committed = true;
1078 if (clear_on_fail) {
1079 hns3_tm_conf_uninit(dev);
1080 hns3_tm_conf_init(dev);
1086 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1088 struct rte_tm_error *error)
1090 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1093 rte_spinlock_lock(&hw->lock);
1094 ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1095 rte_spinlock_unlock(&hw->lock);
1101 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1103 enum hns3_tm_node_type node_type,
1104 struct hns3_tm_shaper_profile *shaper_profile,
1105 struct rte_tm_error *error)
1107 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1111 if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1112 if (shaper_profile != NULL) {
1113 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1114 error->message = "queue node shaper not supported";
1120 if (!pf->tm_conf.committed)
1123 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1124 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1126 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1127 error->message = "fail to update port peak rate";
1134 * update TC's shaper
1136 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1137 ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1139 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1140 error->message = "fail to update TC peak rate";
1147 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1149 uint32_t shaper_profile_id,
1150 struct rte_tm_error *error)
1152 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1153 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1154 struct hns3_tm_shaper_profile *profile = NULL;
1155 struct hns3_tm_node *tm_node;
1160 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1161 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1162 error->message = "device is resetting";
1166 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1167 if (tm_node == NULL) {
1168 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1169 error->message = "no such node";
1173 if (shaper_profile_id == tm_node->params.shaper_profile_id)
1176 if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1177 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1178 if (profile == NULL) {
1179 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1180 error->message = "profile ID not exist";
1185 if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1189 if (tm_node->shaper_profile)
1190 tm_node->shaper_profile->reference_count--;
1191 tm_node->shaper_profile = profile;
1192 tm_node->params.shaper_profile_id = shaper_profile_id;
1193 if (profile != NULL)
1194 profile->reference_count++;
1200 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1202 uint32_t shaper_profile_id,
1203 struct rte_tm_error *error)
1205 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1208 rte_spinlock_lock(&hw->lock);
1209 ret = hns3_tm_node_shaper_update(dev, node_id,
1210 shaper_profile_id, error);
1211 rte_spinlock_unlock(&hw->lock);
1216 static const struct rte_tm_ops hns3_tm_ops = {
1217 .capabilities_get = hns3_tm_capabilities_get,
1218 .shaper_profile_add = hns3_tm_shaper_profile_add,
1219 .shaper_profile_delete = hns3_tm_shaper_profile_del,
1220 .node_add = hns3_tm_node_add,
1221 .node_delete = hns3_tm_node_delete,
1222 .node_type_get = hns3_tm_node_type_get,
1223 .level_capabilities_get = hns3_tm_level_capabilities_get,
1224 .node_capabilities_get = hns3_tm_node_capabilities_get,
1225 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
1226 .node_shaper_update = hns3_tm_node_shaper_update_wrap,
1230 hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1236 *(const void **)arg = &hns3_tm_ops;
1242 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1244 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1246 if (pf->tm_conf.root && !pf->tm_conf.committed)
1248 "please call hierarchy_commit() before starting the port.");
1252 * We need clear tm_conf committed flag when device stop so that user can modify
1253 * tm configuration (e.g. add or delete node).
1255 * If user don't call hierarchy commit when device start later, the Port/TC's
1256 * shaper rate still the same as previous committed.
1258 * To avoid the above problem, we need recover Port/TC shaper rate when device
1262 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1264 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1265 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1266 struct hns3_tm_node *tm_node;
1269 if (!pf->tm_conf.committed)
1272 tm_node = pf->tm_conf.root;
1273 if (tm_node != NULL && tm_node->shaper_profile)
1274 (void)hns3_tm_config_port_rate(hw, NULL);
1276 TAILQ_FOREACH(tm_node, tc_list, node) {
1277 if (tm_node->shaper_profile == NULL)
1279 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1280 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1283 pf->tm_conf.committed = false;
1287 hns3_tm_conf_update(struct hns3_hw *hw)
1289 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1290 struct rte_tm_error error;
1292 if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1295 memset(&error, 0, sizeof(struct rte_tm_error));
1296 return hns3_tm_hierarchy_do_commit(hw, &error);