net/hns3: fix RSS indirection table size
[dpdk.git] / drivers / net / hns3 / hns3_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2020 Hisilicon Limited.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "hns3_ethdev.h"
8 #include "hns3_dcb.h"
9 #include "hns3_logs.h"
10 #include "hns3_tm.h"
11
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
14 {
15         /*
16          * This API will called in pci device probe stage, we can't call
17          * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18          * not setup), so we call the hns3_dev_infos_get.
19          */
20         struct rte_eth_dev_info dev_info;
21
22         memset(&dev_info, 0, sizeof(dev_info));
23         (void)hns3_dev_infos_get(dev, &dev_info);
24         return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
25 }
26
27 void
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
29 {
30         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
32
33         pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
34         pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
35         pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
36
37         TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
38         pf->tm_conf.nb_shaper_profile = 0;
39
40         pf->tm_conf.root = NULL;
41         TAILQ_INIT(&pf->tm_conf.tc_list);
42         TAILQ_INIT(&pf->tm_conf.queue_list);
43         pf->tm_conf.nb_tc_node = 0;
44         pf->tm_conf.nb_queue_node = 0;
45
46         pf->tm_conf.committed = false;
47 }
48
49 void
50 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
51 {
52         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
53         struct hns3_tm_shaper_profile *shaper_profile;
54         struct hns3_tm_node *tm_node;
55
56         if (pf->tm_conf.nb_queue_node > 0) {
57                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
58                         TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
59                         rte_free(tm_node);
60                 }
61                 pf->tm_conf.nb_queue_node = 0;
62         }
63
64         if (pf->tm_conf.nb_tc_node > 0) {
65                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
66                         TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
67                         rte_free(tm_node);
68                 }
69                 pf->tm_conf.nb_tc_node = 0;
70         }
71
72         if (pf->tm_conf.root != NULL) {
73                 rte_free(pf->tm_conf.root);
74                 pf->tm_conf.root = NULL;
75         }
76
77         if (pf->tm_conf.nb_shaper_profile > 0) {
78                 while ((shaper_profile =
79                        TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
80                         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
81                                      shaper_profile, node);
82                         rte_free(shaper_profile);
83                 }
84                 pf->tm_conf.nb_shaper_profile = 0;
85         }
86
87         pf->tm_conf.nb_leaf_nodes_max = 0;
88         pf->tm_conf.nb_nodes_max = 0;
89         pf->tm_conf.nb_shaper_profile_max = 0;
90 }
91
92 static inline uint64_t
93 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
94 {
95 #define FIRMWARE_TO_TM_RATE_SCALE       125000
96         /* tm rate unit is Bps, firmware rate is Mbps */
97         return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
98 }
99
100 static inline uint32_t
101 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
102 {
103 #define TM_TO_FIRMWARE_RATE_SCALE       125000
104         /* tm rate unit is Bps, firmware rate is Mbps */
105         return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
106 }
107
108 static int
109 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
110                          struct rte_tm_capabilities *cap,
111                          struct rte_tm_error *error)
112 {
113         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
114         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
115
116         if (cap == NULL || error == NULL)
117                 return -EINVAL;
118
119         error->type = RTE_TM_ERROR_TYPE_NONE;
120
121         memset(cap, 0, sizeof(struct rte_tm_capabilities));
122
123         cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
124         cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
125         cap->non_leaf_nodes_identical = 1;
126         cap->leaf_nodes_identical = 1;
127         cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
128         cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
129         cap->shaper_private_dual_rate_n_max = 0;
130         cap->shaper_private_rate_min = 0;
131         cap->shaper_private_rate_max =
132                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
133         cap->shaper_shared_n_max = 0;
134         cap->shaper_shared_n_nodes_per_shaper_max = 0;
135         cap->shaper_shared_n_shapers_per_node_max = 0;
136         cap->shaper_shared_dual_rate_n_max = 0;
137         cap->shaper_shared_rate_min = 0;
138         cap->shaper_shared_rate_max = 0;
139
140         cap->sched_n_children_max = max_tx_queues;
141         cap->sched_sp_n_priorities_max = 1;
142         cap->sched_wfq_n_children_per_group_max = 0;
143         cap->sched_wfq_n_groups_max = 0;
144         cap->sched_wfq_weight_max = 1;
145
146         cap->cman_head_drop_supported = 0;
147         cap->dynamic_update_mask = 0;
148         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
149         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
150         cap->cman_wred_context_n_max = 0;
151         cap->cman_wred_context_private_n_max = 0;
152         cap->cman_wred_context_shared_n_max = 0;
153         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
154         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
155         cap->stats_mask = 0;
156
157         return 0;
158 }
159
160 static struct hns3_tm_shaper_profile *
161 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
162                               uint32_t shaper_profile_id)
163 {
164         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
165         struct hns3_shaper_profile_list *shaper_profile_list =
166                 &pf->tm_conf.shaper_profile_list;
167         struct hns3_tm_shaper_profile *shaper_profile;
168
169         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
170                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
171                         return shaper_profile;
172         }
173
174         return NULL;
175 }
176
177 static int
178 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
179                                    struct rte_tm_shaper_params *profile,
180                                    struct rte_tm_error *error)
181 {
182         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
183
184         if (profile->committed.rate) {
185                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
186                 error->message = "committed rate not supported";
187                 return -EINVAL;
188         }
189
190         if (profile->committed.size) {
191                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
192                 error->message = "committed bucket size not supported";
193                 return -EINVAL;
194         }
195
196         if (profile->peak.rate >
197             hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
198                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
199                 error->message = "peak rate too large";
200                 return -EINVAL;
201         }
202
203         if (profile->peak.size) {
204                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
205                 error->message = "peak bucket size not supported";
206                 return -EINVAL;
207         }
208
209         if (profile->pkt_length_adjust) {
210                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
211                 error->message = "packet length adjustment not supported";
212                 return -EINVAL;
213         }
214
215         if (profile->packet_mode) {
216                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
217                 error->message = "packet mode not supported";
218                 return -EINVAL;
219         }
220
221         return 0;
222 }
223
224 static int
225 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
226                            uint32_t shaper_profile_id,
227                            struct rte_tm_shaper_params *profile,
228                            struct rte_tm_error *error)
229 {
230         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
231         struct hns3_tm_shaper_profile *shaper_profile;
232         int ret;
233
234         if (profile == NULL || error == NULL)
235                 return -EINVAL;
236
237         if (pf->tm_conf.nb_shaper_profile >=
238             pf->tm_conf.nb_shaper_profile_max) {
239                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
240                 error->message = "too much profiles";
241                 return -EINVAL;
242         }
243
244         ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
245         if (ret)
246                 return ret;
247
248         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
249         if (shaper_profile) {
250                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
251                 error->message = "profile ID exist";
252                 return -EINVAL;
253         }
254
255         shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
256                                      sizeof(struct hns3_tm_shaper_profile),
257                                      0);
258         if (shaper_profile == NULL)
259                 return -ENOMEM;
260
261         shaper_profile->shaper_profile_id = shaper_profile_id;
262         memcpy(&shaper_profile->profile, profile,
263                sizeof(struct rte_tm_shaper_params));
264         TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
265                           shaper_profile, node);
266         pf->tm_conf.nb_shaper_profile++;
267
268         return 0;
269 }
270
271 static int
272 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
273                            uint32_t shaper_profile_id,
274                            struct rte_tm_error *error)
275 {
276         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
277         struct hns3_tm_shaper_profile *shaper_profile;
278
279         if (error == NULL)
280                 return -EINVAL;
281
282         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
283         if (shaper_profile == NULL) {
284                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
285                 error->message = "profile ID not exist";
286                 return -EINVAL;
287         }
288
289         if (shaper_profile->reference_count) {
290                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
291                 error->message = "profile in use";
292                 return -EINVAL;
293         }
294
295         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
296         rte_free(shaper_profile);
297         pf->tm_conf.nb_shaper_profile--;
298
299         return 0;
300 }
301
302 static struct hns3_tm_node *
303 hns3_tm_node_search(struct rte_eth_dev *dev,
304                     uint32_t node_id,
305                     enum hns3_tm_node_type *node_type)
306 {
307         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
308         struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
309         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
310         struct hns3_tm_node *tm_node;
311
312         if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
313                 *node_type = HNS3_TM_NODE_TYPE_PORT;
314                 return pf->tm_conf.root;
315         }
316
317         TAILQ_FOREACH(tm_node, tc_list, node) {
318                 if (tm_node->id == node_id) {
319                         *node_type = HNS3_TM_NODE_TYPE_TC;
320                         return tm_node;
321                 }
322         }
323
324         TAILQ_FOREACH(tm_node, queue_list, node) {
325                 if (tm_node->id == node_id) {
326                         *node_type = HNS3_TM_NODE_TYPE_QUEUE;
327                         return tm_node;
328                 }
329         }
330
331         return NULL;
332 }
333
334 static int
335 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
336                                  struct rte_tm_node_params *params,
337                                  struct rte_tm_error *error)
338 {
339         struct hns3_tm_shaper_profile *shaper_profile;
340
341         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
342                 shaper_profile = hns3_tm_shaper_profile_search(dev,
343                                  params->shaper_profile_id);
344                 if (shaper_profile == NULL) {
345                         error->type =
346                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
347                         error->message = "shaper profile not exist";
348                         return -EINVAL;
349                 }
350         }
351
352         if (params->nonleaf.wfq_weight_mode) {
353                 error->type =
354                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
355                 error->message = "WFQ not supported";
356                 return -EINVAL;
357         }
358
359         if (params->nonleaf.n_sp_priorities != 1) {
360                 error->type =
361                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
362                 error->message = "SP priority not supported";
363                 return -EINVAL;
364         }
365
366         return 0;
367 }
368
369 static int
370 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
371                               struct rte_tm_node_params *params,
372                               struct rte_tm_error *error)
373
374 {
375         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
376                 error->type =
377                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
378                 error->message = "shaper not supported";
379                 return -EINVAL;
380         }
381
382         if (params->leaf.cman) {
383                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
384                 error->message = "congestion management not supported";
385                 return -EINVAL;
386         }
387
388         if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
389                 error->type =
390                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
391                 error->message = "WRED not supported";
392                 return -EINVAL;
393         }
394
395         if (params->leaf.wred.shared_wred_context_id) {
396                 error->type =
397                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
398                 error->message = "WRED not supported";
399                 return -EINVAL;
400         }
401
402         if (params->leaf.wred.n_shared_wred_contexts) {
403                 error->type =
404                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
405                 error->message = "WRED not supported";
406                 return -EINVAL;
407         }
408
409         return 0;
410 }
411
412 static int
413 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
414                          uint32_t priority, uint32_t weight,
415                          struct rte_tm_node_params *params,
416                          struct rte_tm_error *error)
417 {
418         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
419         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
420
421         if (node_id == RTE_TM_NODE_ID_NULL) {
422                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
423                 error->message = "invalid node id";
424                 return -EINVAL;
425         }
426
427         if (hns3_tm_node_search(dev, node_id, &node_type)) {
428                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
429                 error->message = "node id already used";
430                 return -EINVAL;
431         }
432
433         if (priority) {
434                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
435                 error->message = "priority should be 0";
436                 return -EINVAL;
437         }
438
439         if (weight != 1) {
440                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
441                 error->message = "weight must be 1";
442                 return -EINVAL;
443         }
444
445         if (params->shared_shaper_id) {
446                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
447                 error->message = "shared shaper not supported";
448                 return -EINVAL;
449         }
450         if (params->n_shared_shapers) {
451                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
452                 error->message = "shared shaper not supported";
453                 return -EINVAL;
454         }
455
456         if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
457                 return hns3_tm_nonleaf_node_param_check(dev, params, error);
458         else
459                 return hns3_tm_leaf_node_param_check(dev, params, error);
460 }
461
462 static int
463 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
464                       uint32_t level_id, struct rte_tm_node_params *params,
465                       struct rte_tm_error *error)
466 {
467         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
468         struct hns3_tm_node *tm_node;
469
470         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
471             level_id != HNS3_TM_NODE_LEVEL_PORT) {
472                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
473                 error->message = "wrong level";
474                 return -EINVAL;
475         }
476
477         if (node_id != pf->tm_conf.nb_nodes_max - 1) {
478                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
479                 error->message = "invalid port node ID";
480                 return -EINVAL;
481         }
482
483         if (pf->tm_conf.root) {
484                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
485                 error->message = "already have a root";
486                 return -EINVAL;
487         }
488
489         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
490         if (tm_node == NULL)
491                 return -ENOMEM;
492
493         tm_node->id = node_id;
494         tm_node->reference_count = 0;
495         tm_node->parent = NULL;
496         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
497                                   params->shaper_profile_id);
498         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
499         pf->tm_conf.root = tm_node;
500
501         if (tm_node->shaper_profile)
502                 tm_node->shaper_profile->reference_count++;
503
504         return 0;
505 }
506
507 static int
508 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
509                     uint32_t level_id, struct hns3_tm_node *parent_node,
510                     struct rte_tm_node_params *params,
511                     struct rte_tm_error *error)
512 {
513         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
514         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
515         struct hns3_tm_node *tm_node;
516
517         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
518             level_id != HNS3_TM_NODE_LEVEL_TC) {
519                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
520                 error->message = "wrong level";
521                 return -EINVAL;
522         }
523
524         if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
525             node_id < pf->tm_conf.nb_leaf_nodes_max ||
526             hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
527                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
528                 error->message = "invalid tc node ID";
529                 return -EINVAL;
530         }
531
532         if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
533                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
534                 error->message = "too many TCs";
535                 return -EINVAL;
536         }
537
538         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
539         if (tm_node == NULL)
540                 return -ENOMEM;
541
542         tm_node->id = node_id;
543         tm_node->reference_count = 0;
544         tm_node->parent = parent_node;
545         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
546                                         params->shaper_profile_id);
547         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
548         TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
549         pf->tm_conf.nb_tc_node++;
550         tm_node->parent->reference_count++;
551
552         if (tm_node->shaper_profile)
553                 tm_node->shaper_profile->reference_count++;
554
555         return 0;
556 }
557
558 static int
559 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
560                        uint32_t level_id, struct hns3_tm_node *parent_node,
561                        struct rte_tm_node_params *params,
562                        struct rte_tm_error *error)
563 {
564         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
565         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
566         struct hns3_tm_node *tm_node;
567
568         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
569             level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
570                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
571                 error->message = "wrong level";
572                 return -EINVAL;
573         }
574
575         /* note: dev->data->nb_tx_queues <= max_tx_queues */
576         if (node_id >= dev->data->nb_tx_queues) {
577                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
578                 error->message = "invalid queue node ID";
579                 return -EINVAL;
580         }
581
582         if (hns3_txq_mapped_tc_get(hw, node_id) !=
583             hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
584                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
585                 error->message = "queue's TC not match parent's TC";
586                 return -EINVAL;
587         }
588
589         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
590         if (tm_node == NULL)
591                 return -ENOMEM;
592
593         tm_node->id = node_id;
594         tm_node->reference_count = 0;
595         tm_node->parent = parent_node;
596         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
597         TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
598         pf->tm_conf.nb_queue_node++;
599         tm_node->parent->reference_count++;
600
601         return 0;
602 }
603
604 static int
605 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
606                  uint32_t parent_node_id, uint32_t priority,
607                  uint32_t weight, uint32_t level_id,
608                  struct rte_tm_node_params *params,
609                  struct rte_tm_error *error)
610 {
611         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
612         enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
613         struct hns3_tm_node *parent_node;
614         int ret;
615
616         if (params == NULL || error == NULL)
617                 return -EINVAL;
618
619         if (pf->tm_conf.committed) {
620                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
621                 error->message = "already committed";
622                 return -EINVAL;
623         }
624
625         ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
626                                        params, error);
627         if (ret)
628                 return ret;
629
630         /* root node who don't have a parent */
631         if (parent_node_id == RTE_TM_NODE_ID_NULL)
632                 return hns3_tm_port_node_add(dev, node_id, level_id,
633                                              params, error);
634
635         parent_node = hns3_tm_node_search(dev, parent_node_id,
636                                           &parent_node_type);
637         if (parent_node == NULL) {
638                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
639                 error->message = "parent not exist";
640                 return -EINVAL;
641         }
642
643         if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
644             parent_node_type != HNS3_TM_NODE_TYPE_TC) {
645                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
646                 error->message = "parent is not port or TC";
647                 return -EINVAL;
648         }
649
650         if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
651                 return hns3_tm_tc_node_add(dev, node_id, level_id,
652                                            parent_node, params, error);
653         else
654                 return hns3_tm_queue_node_add(dev, node_id, level_id,
655                                               parent_node, params, error);
656 }
657
658 static void
659 hns3_tm_node_do_delete(struct hns3_pf *pf,
660                        enum hns3_tm_node_type node_type,
661                        struct hns3_tm_node *tm_node)
662 {
663         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
664                 if (tm_node->shaper_profile)
665                         tm_node->shaper_profile->reference_count--;
666                 rte_free(tm_node);
667                 pf->tm_conf.root = NULL;
668                 return;
669         }
670
671         if (tm_node->shaper_profile)
672                 tm_node->shaper_profile->reference_count--;
673         tm_node->parent->reference_count--;
674         if (node_type == HNS3_TM_NODE_TYPE_TC) {
675                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
676                 pf->tm_conf.nb_tc_node--;
677         } else {
678                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
679                 pf->tm_conf.nb_queue_node--;
680         }
681         rte_free(tm_node);
682 }
683
684 static int
685 hns3_tm_node_delete(struct rte_eth_dev *dev,
686                     uint32_t node_id,
687                     struct rte_tm_error *error)
688 {
689         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
690         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
691         struct hns3_tm_node *tm_node;
692
693         if (error == NULL)
694                 return -EINVAL;
695
696         if (pf->tm_conf.committed) {
697                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
698                 error->message = "already committed";
699                 return -EINVAL;
700         }
701
702         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
703         if (tm_node == NULL) {
704                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
705                 error->message = "no such node";
706                 return -EINVAL;
707         }
708
709         if (tm_node->reference_count) {
710                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
711                 error->message = "cannot delete a node which has children";
712                 return -EINVAL;
713         }
714
715         hns3_tm_node_do_delete(pf, node_type, tm_node);
716
717         return 0;
718 }
719
720 static int
721 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
722                       int *is_leaf, struct rte_tm_error *error)
723 {
724         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
725         struct hns3_tm_node *tm_node;
726
727         if (is_leaf == NULL || error == NULL)
728                 return -EINVAL;
729
730         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
731         if (tm_node == NULL) {
732                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
733                 error->message = "no such node";
734                 return -EINVAL;
735         }
736
737         if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
738                 *is_leaf = true;
739         else
740                 *is_leaf = false;
741
742         return 0;
743 }
744
745 static void
746 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
747                                        uint32_t level_id,
748                                        struct rte_tm_level_capabilities *cap)
749 {
750         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
751         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
752
753         if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
754                 cap->n_nodes_max = 1;
755                 cap->n_nodes_nonleaf_max = 1;
756                 cap->n_nodes_leaf_max = 0;
757         } else {
758                 cap->n_nodes_max = HNS3_MAX_TC_NUM;
759                 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
760                 cap->n_nodes_leaf_max = 0;
761         }
762
763         cap->non_leaf_nodes_identical = 1;
764         cap->leaf_nodes_identical = 1;
765
766         cap->nonleaf.shaper_private_supported = true;
767         cap->nonleaf.shaper_private_dual_rate_supported = false;
768         cap->nonleaf.shaper_private_rate_min = 0;
769         cap->nonleaf.shaper_private_rate_max =
770                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
771         cap->nonleaf.shaper_shared_n_max = 0;
772         if (level_id == HNS3_TM_NODE_LEVEL_PORT)
773                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
774         else
775                 cap->nonleaf.sched_n_children_max = max_tx_queues;
776         cap->nonleaf.sched_sp_n_priorities_max = 1;
777         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
778         cap->nonleaf.sched_wfq_n_groups_max = 0;
779         cap->nonleaf.sched_wfq_weight_max = 1;
780         cap->nonleaf.stats_mask = 0;
781 }
782
783 static void
784 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
785                                     struct rte_tm_level_capabilities *cap)
786 {
787         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
788
789         cap->n_nodes_max = max_tx_queues;
790         cap->n_nodes_nonleaf_max = 0;
791         cap->n_nodes_leaf_max = max_tx_queues;
792
793         cap->non_leaf_nodes_identical = 1;
794         cap->leaf_nodes_identical = 1;
795
796         cap->leaf.shaper_private_supported = false;
797         cap->leaf.shaper_private_dual_rate_supported = false;
798         cap->leaf.shaper_private_rate_min = 0;
799         cap->leaf.shaper_private_rate_max = 0;
800         cap->leaf.shaper_shared_n_max = 0;
801         cap->leaf.cman_head_drop_supported = false;
802         cap->leaf.cman_wred_context_private_supported = false;
803         cap->leaf.cman_wred_context_shared_n_max = 0;
804         cap->leaf.stats_mask = 0;
805 }
806
807 static int
808 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
809                                uint32_t level_id,
810                                struct rte_tm_level_capabilities *cap,
811                                struct rte_tm_error *error)
812 {
813         if (cap == NULL || error == NULL)
814                 return -EINVAL;
815
816         if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
817                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
818                 error->message = "too deep level";
819                 return -EINVAL;
820         }
821
822         memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
823
824         if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
825                 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
826         else
827                 hns3_tm_leaf_level_capabilities_get(dev, cap);
828
829         return 0;
830 }
831
832 static void
833 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
834                                       enum hns3_tm_node_type node_type,
835                                       struct rte_tm_node_capabilities *cap)
836 {
837         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
838         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
839
840         cap->shaper_private_supported = true;
841         cap->shaper_private_dual_rate_supported = false;
842         cap->shaper_private_rate_min = 0;
843         cap->shaper_private_rate_max =
844                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
845         cap->shaper_shared_n_max = 0;
846
847         if (node_type == HNS3_TM_NODE_TYPE_PORT)
848                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
849         else
850                 cap->nonleaf.sched_n_children_max = max_tx_queues;
851         cap->nonleaf.sched_sp_n_priorities_max = 1;
852         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
853         cap->nonleaf.sched_wfq_n_groups_max = 0;
854         cap->nonleaf.sched_wfq_weight_max = 1;
855
856         cap->stats_mask = 0;
857 }
858
859 static void
860 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
861                                    struct rte_tm_node_capabilities *cap)
862 {
863         cap->shaper_private_supported = false;
864         cap->shaper_private_dual_rate_supported = false;
865         cap->shaper_private_rate_min = 0;
866         cap->shaper_private_rate_max = 0;
867         cap->shaper_shared_n_max = 0;
868
869         cap->leaf.cman_head_drop_supported = false;
870         cap->leaf.cman_wred_context_private_supported = false;
871         cap->leaf.cman_wred_context_shared_n_max = 0;
872
873         cap->stats_mask = 0;
874 }
875
876 static int
877 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
878                               uint32_t node_id,
879                               struct rte_tm_node_capabilities *cap,
880                               struct rte_tm_error *error)
881 {
882         enum hns3_tm_node_type node_type;
883         struct hns3_tm_node *tm_node;
884
885         if (cap == NULL || error == NULL)
886                 return -EINVAL;
887
888         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
889         if (tm_node == NULL) {
890                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
891                 error->message = "no such node";
892                 return -EINVAL;
893         }
894
895         memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
896
897         if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
898                 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
899         else
900                 hns3_tm_leaf_node_capabilities_get(dev, cap);
901
902         return 0;
903 }
904
905 static int
906 hns3_tm_config_port_rate(struct hns3_hw *hw,
907                          struct hns3_tm_shaper_profile *shaper_profile)
908 {
909         uint32_t firmware_rate;
910         uint64_t rate;
911
912         if (shaper_profile) {
913                 rate = shaper_profile->profile.peak.rate;
914                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
915         } else {
916                 firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
917         }
918
919         /*
920          * The TM shaper topology after device inited:
921          *     pri0 shaper   --->|
922          *     pri1 shaper   --->|
923          *     ...               |----> pg0 shaper ----> port shaper
924          *     ...               |
925          *     priX shaper   --->|
926          *
927          * Because port shaper rate maybe changed by firmware, to avoid
928          * concurrent configure, driver use pg0 shaper to achieve the rate limit
929          * of port.
930          *
931          * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
932          */
933         return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
934 }
935
936 static int
937 hns3_tm_config_tc_rate(struct hns3_hw *hw,
938                        uint8_t tc_no,
939                        struct hns3_tm_shaper_profile *shaper_profile)
940 {
941         uint32_t firmware_rate;
942         uint64_t rate;
943
944         if (shaper_profile) {
945                 rate = shaper_profile->profile.peak.rate;
946                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
947         } else {
948                 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
949         }
950
951         return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
952 }
953
954 static bool
955 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
956 {
957         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
958         struct hns3_tm_conf *tm_conf = &pf->tm_conf;
959         struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
960         struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
961         struct hns3_tm_node *tm_node;
962
963         /* TC */
964         TAILQ_FOREACH(tm_node, tc_list, node) {
965                 if (!tm_node->reference_count) {
966                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
967                         error->message = "TC without queue assigned";
968                         return false;
969                 }
970
971                 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
972                         hw->num_tc) {
973                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
974                         error->message = "node's TC not exist";
975                         return false;
976                 }
977         }
978
979         /* Queue */
980         TAILQ_FOREACH(tm_node, queue_list, node) {
981                 if (tm_node->id >= hw->data->nb_tx_queues) {
982                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
983                         error->message = "node's queue invalid";
984                         return false;
985                 }
986
987                 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
988                     hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
989                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
990                         error->message = "queue's TC not match parent's TC";
991                         return false;
992                 }
993         }
994
995         return true;
996 }
997
998 static int
999 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1000                             struct rte_tm_error *error)
1001 {
1002         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1003         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1004         struct hns3_tm_node *tm_node;
1005         uint8_t tc_no;
1006         int ret;
1007
1008         /* port */
1009         tm_node = pf->tm_conf.root;
1010         if (tm_node->shaper_profile) {
1011                 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1012                 if (ret) {
1013                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1014                         error->message = "fail to set port peak rate";
1015                         return -EIO;
1016                 }
1017         }
1018
1019         /* TC */
1020         TAILQ_FOREACH(tm_node, tc_list, node) {
1021                 if (tm_node->shaper_profile == NULL)
1022                         continue;
1023
1024                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1025                 ret = hns3_tm_config_tc_rate(hw, tc_no,
1026                                              tm_node->shaper_profile);
1027                 if (ret) {
1028                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1029                         error->message = "fail to set TC peak rate";
1030                         return -EIO;
1031                 }
1032         }
1033
1034         return 0;
1035 }
1036
1037 static int
1038 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1039                          int clear_on_fail,
1040                          struct rte_tm_error *error)
1041 {
1042         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044         int ret;
1045
1046         if (error == NULL)
1047                 return -EINVAL;
1048
1049         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1050                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1051                 error->message = "device is resetting";
1052                 /* don't goto fail_clear, user may try later */
1053                 return -EBUSY;
1054         }
1055
1056         if (pf->tm_conf.root == NULL)
1057                 goto done;
1058
1059         /* check configure before commit make sure key configure not violated */
1060         if (!hns3_tm_configure_check(hw, error))
1061                 goto fail_clear;
1062
1063         ret = hns3_tm_hierarchy_do_commit(hw, error);
1064         if (ret)
1065                 goto fail_clear;
1066
1067 done:
1068         pf->tm_conf.committed = true;
1069         return 0;
1070
1071 fail_clear:
1072         if (clear_on_fail) {
1073                 hns3_tm_conf_uninit(dev);
1074                 hns3_tm_conf_init(dev);
1075         }
1076         return -EINVAL;
1077 }
1078
1079 static int
1080 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1081                               int clear_on_fail,
1082                               struct rte_tm_error *error)
1083 {
1084         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1085         int ret;
1086
1087         rte_spinlock_lock(&hw->lock);
1088         ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1089         rte_spinlock_unlock(&hw->lock);
1090
1091         return ret;
1092 }
1093
1094 static int
1095 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1096                               uint32_t node_id,
1097                               enum hns3_tm_node_type node_type,
1098                               struct hns3_tm_shaper_profile *shaper_profile,
1099                               struct rte_tm_error *error)
1100 {
1101         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1102         uint8_t tc_no;
1103         int ret;
1104
1105         if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1106                 if (shaper_profile != NULL) {
1107                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1108                         error->message = "queue node shaper not supported";
1109                         return -EINVAL;
1110                 }
1111                 return 0;
1112         }
1113
1114         if (!pf->tm_conf.committed)
1115                 return 0;
1116
1117         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1118                 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1119                 if (ret) {
1120                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1121                         error->message = "fail to update port peak rate";
1122                 }
1123
1124                 return ret;
1125         }
1126
1127         /*
1128          * update TC's shaper
1129          */
1130         tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1131         ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1132         if (ret) {
1133                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1134                 error->message = "fail to update TC peak rate";
1135         }
1136
1137         return ret;
1138 }
1139
1140 static int
1141 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1142                            uint32_t node_id,
1143                            uint32_t shaper_profile_id,
1144                            struct rte_tm_error *error)
1145 {
1146         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1147         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1148         struct hns3_tm_shaper_profile *profile = NULL;
1149         struct hns3_tm_node *tm_node;
1150
1151         if (error == NULL)
1152                 return -EINVAL;
1153
1154         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1155                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1156                 error->message = "device is resetting";
1157                 return -EBUSY;
1158         }
1159
1160         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1161         if (tm_node == NULL) {
1162                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1163                 error->message = "no such node";
1164                 return -EINVAL;
1165         }
1166
1167         if (shaper_profile_id == tm_node->params.shaper_profile_id)
1168                 return 0;
1169
1170         if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1171                 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1172                 if (profile == NULL) {
1173                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1174                         error->message = "profile ID not exist";
1175                         return -EINVAL;
1176                 }
1177         }
1178
1179         if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1180                                           profile, error))
1181                 return -EINVAL;
1182
1183         if (tm_node->shaper_profile)
1184                 tm_node->shaper_profile->reference_count--;
1185         tm_node->shaper_profile = profile;
1186         tm_node->params.shaper_profile_id = shaper_profile_id;
1187         if (profile != NULL)
1188                 profile->reference_count++;
1189
1190         return 0;
1191 }
1192
1193 static int
1194 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1195                                 uint32_t node_id,
1196                                 uint32_t shaper_profile_id,
1197                                 struct rte_tm_error *error)
1198 {
1199         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1200         int ret;
1201
1202         rte_spinlock_lock(&hw->lock);
1203         ret = hns3_tm_node_shaper_update(dev, node_id,
1204                                          shaper_profile_id, error);
1205         rte_spinlock_unlock(&hw->lock);
1206
1207         return ret;
1208 }
1209
1210 static const struct rte_tm_ops hns3_tm_ops = {
1211         .capabilities_get       = hns3_tm_capabilities_get,
1212         .shaper_profile_add     = hns3_tm_shaper_profile_add,
1213         .shaper_profile_delete  = hns3_tm_shaper_profile_del,
1214         .node_add               = hns3_tm_node_add,
1215         .node_delete            = hns3_tm_node_delete,
1216         .node_type_get          = hns3_tm_node_type_get,
1217         .level_capabilities_get = hns3_tm_level_capabilities_get,
1218         .node_capabilities_get  = hns3_tm_node_capabilities_get,
1219         .hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
1220         .node_shaper_update     = hns3_tm_node_shaper_update_wrap,
1221 };
1222
1223 int
1224 hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1225                 void *arg)
1226 {
1227         if (arg == NULL)
1228                 return -EINVAL;
1229
1230         *(const void **)arg = &hns3_tm_ops;
1231
1232         return 0;
1233 }
1234
1235 void
1236 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1237 {
1238         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1239
1240         if (pf->tm_conf.root && !pf->tm_conf.committed)
1241                 hns3_warn(hw,
1242                     "please call hierarchy_commit() before starting the port.");
1243 }
1244
1245 /*
1246  * We need clear tm_conf committed flag when device stop so that user can modify
1247  * tm configuration (e.g. add or delete node).
1248  *
1249  * If user don't call hierarchy commit when device start later, the Port/TC's
1250  * shaper rate still the same as previous committed.
1251  *
1252  * To avoid the above problem, we need recover Port/TC shaper rate when device
1253  * stop.
1254  */
1255 void
1256 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1257 {
1258         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1259         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1260         struct hns3_tm_node *tm_node;
1261         uint8_t tc_no;
1262
1263         if (!pf->tm_conf.committed)
1264                 return;
1265
1266         tm_node = pf->tm_conf.root;
1267         if (tm_node != NULL && tm_node->shaper_profile)
1268                 (void)hns3_tm_config_port_rate(hw, NULL);
1269
1270         TAILQ_FOREACH(tm_node, tc_list, node) {
1271                 if (tm_node->shaper_profile == NULL)
1272                         continue;
1273                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1274                 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1275         }
1276
1277         pf->tm_conf.committed = false;
1278 }
1279
1280 int
1281 hns3_tm_conf_update(struct hns3_hw *hw)
1282 {
1283         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1284         struct rte_tm_error error;
1285
1286         if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1287                 return 0;
1288
1289         memset(&error, 0, sizeof(struct rte_tm_error));
1290         return hns3_tm_hierarchy_do_commit(hw, &error);
1291 }