event/cnxk: support vectorized Rx event fast path
[dpdk.git] / drivers / net / hns3 / hns3_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 HiSilicon Limited.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "hns3_ethdev.h"
8 #include "hns3_dcb.h"
9 #include "hns3_logs.h"
10 #include "hns3_tm.h"
11
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
14 {
15         /*
16          * This API will called in pci device probe stage, we can't call
17          * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18          * not setup), so we call the hns3_dev_infos_get.
19          */
20         struct rte_eth_dev_info dev_info;
21
22         memset(&dev_info, 0, sizeof(dev_info));
23         (void)hns3_dev_infos_get(dev, &dev_info);
24         return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
25 }
26
27 void
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
29 {
30         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
32         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
33
34         if (!hns3_dev_tm_supported(hw))
35                 return;
36
37         pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
38         pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
39         pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
40
41         TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
42         pf->tm_conf.nb_shaper_profile = 0;
43
44         pf->tm_conf.root = NULL;
45         TAILQ_INIT(&pf->tm_conf.tc_list);
46         TAILQ_INIT(&pf->tm_conf.queue_list);
47         pf->tm_conf.nb_tc_node = 0;
48         pf->tm_conf.nb_queue_node = 0;
49
50         pf->tm_conf.committed = false;
51 }
52
53 void
54 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
55 {
56         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
57         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
58         struct hns3_tm_shaper_profile *shaper_profile;
59         struct hns3_tm_node *tm_node;
60
61         if (!hns3_dev_tm_supported(hw))
62                 return;
63
64         if (pf->tm_conf.nb_queue_node > 0) {
65                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
66                         TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
67                         rte_free(tm_node);
68                 }
69                 pf->tm_conf.nb_queue_node = 0;
70         }
71
72         if (pf->tm_conf.nb_tc_node > 0) {
73                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
74                         TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
75                         rte_free(tm_node);
76                 }
77                 pf->tm_conf.nb_tc_node = 0;
78         }
79
80         if (pf->tm_conf.root != NULL) {
81                 rte_free(pf->tm_conf.root);
82                 pf->tm_conf.root = NULL;
83         }
84
85         if (pf->tm_conf.nb_shaper_profile > 0) {
86                 while ((shaper_profile =
87                        TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
88                         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
89                                      shaper_profile, node);
90                         rte_free(shaper_profile);
91                 }
92                 pf->tm_conf.nb_shaper_profile = 0;
93         }
94
95         pf->tm_conf.nb_leaf_nodes_max = 0;
96         pf->tm_conf.nb_nodes_max = 0;
97         pf->tm_conf.nb_shaper_profile_max = 0;
98 }
99
100 static inline uint64_t
101 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
102 {
103 #define FIRMWARE_TO_TM_RATE_SCALE       125000
104         /* tm rate unit is Bps, firmware rate is Mbps */
105         return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
106 }
107
108 static inline uint32_t
109 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
110 {
111 #define TM_TO_FIRMWARE_RATE_SCALE       125000
112         /* tm rate unit is Bps, firmware rate is Mbps */
113         return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
114 }
115
116 static int
117 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
118                          struct rte_tm_capabilities *cap,
119                          struct rte_tm_error *error)
120 {
121         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
122         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
123
124         if (cap == NULL || error == NULL)
125                 return -EINVAL;
126
127         error->type = RTE_TM_ERROR_TYPE_NONE;
128
129         memset(cap, 0, sizeof(struct rte_tm_capabilities));
130
131         cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
132         cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
133         cap->non_leaf_nodes_identical = 1;
134         cap->leaf_nodes_identical = 1;
135         cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
136         cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
137         cap->shaper_private_dual_rate_n_max = 0;
138         cap->shaper_private_rate_min = 0;
139         cap->shaper_private_rate_max =
140                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
141         cap->shaper_shared_n_max = 0;
142         cap->shaper_shared_n_nodes_per_shaper_max = 0;
143         cap->shaper_shared_n_shapers_per_node_max = 0;
144         cap->shaper_shared_dual_rate_n_max = 0;
145         cap->shaper_shared_rate_min = 0;
146         cap->shaper_shared_rate_max = 0;
147
148         cap->sched_n_children_max = max_tx_queues;
149         cap->sched_sp_n_priorities_max = 1;
150         cap->sched_wfq_n_children_per_group_max = 0;
151         cap->sched_wfq_n_groups_max = 0;
152         cap->sched_wfq_weight_max = 1;
153
154         cap->cman_head_drop_supported = 0;
155         cap->dynamic_update_mask = 0;
156         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
157         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
158         cap->cman_wred_context_n_max = 0;
159         cap->cman_wred_context_private_n_max = 0;
160         cap->cman_wred_context_shared_n_max = 0;
161         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
162         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
163         cap->stats_mask = 0;
164
165         return 0;
166 }
167
168 static struct hns3_tm_shaper_profile *
169 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
170                               uint32_t shaper_profile_id)
171 {
172         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
173         struct hns3_shaper_profile_list *shaper_profile_list =
174                 &pf->tm_conf.shaper_profile_list;
175         struct hns3_tm_shaper_profile *shaper_profile;
176
177         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
178                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
179                         return shaper_profile;
180         }
181
182         return NULL;
183 }
184
185 static int
186 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
187                                    struct rte_tm_shaper_params *profile,
188                                    struct rte_tm_error *error)
189 {
190         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
191
192         if (profile->committed.rate) {
193                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
194                 error->message = "committed rate not supported";
195                 return -EINVAL;
196         }
197
198         if (profile->committed.size) {
199                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
200                 error->message = "committed bucket size not supported";
201                 return -EINVAL;
202         }
203
204         if (profile->peak.rate >
205             hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
206                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
207                 error->message = "peak rate too large";
208                 return -EINVAL;
209         }
210
211         if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
212                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
213                 error->message = "peak rate must be at least 1Mbps";
214                 return -EINVAL;
215         }
216
217         if (profile->peak.size) {
218                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
219                 error->message = "peak bucket size not supported";
220                 return -EINVAL;
221         }
222
223         if (profile->pkt_length_adjust) {
224                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
225                 error->message = "packet length adjustment not supported";
226                 return -EINVAL;
227         }
228
229         if (profile->packet_mode) {
230                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
231                 error->message = "packet mode not supported";
232                 return -EINVAL;
233         }
234
235         return 0;
236 }
237
238 static int
239 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
240                            uint32_t shaper_profile_id,
241                            struct rte_tm_shaper_params *profile,
242                            struct rte_tm_error *error)
243 {
244         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
245         struct hns3_tm_shaper_profile *shaper_profile;
246         int ret;
247
248         if (profile == NULL || error == NULL)
249                 return -EINVAL;
250
251         if (pf->tm_conf.nb_shaper_profile >=
252             pf->tm_conf.nb_shaper_profile_max) {
253                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
254                 error->message = "too much profiles";
255                 return -EINVAL;
256         }
257
258         ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
259         if (ret)
260                 return ret;
261
262         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
263         if (shaper_profile) {
264                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
265                 error->message = "profile ID exist";
266                 return -EINVAL;
267         }
268
269         shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
270                                      sizeof(struct hns3_tm_shaper_profile),
271                                      0);
272         if (shaper_profile == NULL)
273                 return -ENOMEM;
274
275         shaper_profile->shaper_profile_id = shaper_profile_id;
276         memcpy(&shaper_profile->profile, profile,
277                sizeof(struct rte_tm_shaper_params));
278         TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
279                           shaper_profile, node);
280         pf->tm_conf.nb_shaper_profile++;
281
282         return 0;
283 }
284
285 static int
286 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
287                            uint32_t shaper_profile_id,
288                            struct rte_tm_error *error)
289 {
290         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
291         struct hns3_tm_shaper_profile *shaper_profile;
292
293         if (error == NULL)
294                 return -EINVAL;
295
296         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
297         if (shaper_profile == NULL) {
298                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
299                 error->message = "profile ID not exist";
300                 return -EINVAL;
301         }
302
303         if (shaper_profile->reference_count) {
304                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
305                 error->message = "profile in use";
306                 return -EINVAL;
307         }
308
309         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
310         rte_free(shaper_profile);
311         pf->tm_conf.nb_shaper_profile--;
312
313         return 0;
314 }
315
316 static struct hns3_tm_node *
317 hns3_tm_node_search(struct rte_eth_dev *dev,
318                     uint32_t node_id,
319                     enum hns3_tm_node_type *node_type)
320 {
321         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
322         struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
323         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
324         struct hns3_tm_node *tm_node;
325
326         if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
327                 *node_type = HNS3_TM_NODE_TYPE_PORT;
328                 return pf->tm_conf.root;
329         }
330
331         TAILQ_FOREACH(tm_node, tc_list, node) {
332                 if (tm_node->id == node_id) {
333                         *node_type = HNS3_TM_NODE_TYPE_TC;
334                         return tm_node;
335                 }
336         }
337
338         TAILQ_FOREACH(tm_node, queue_list, node) {
339                 if (tm_node->id == node_id) {
340                         *node_type = HNS3_TM_NODE_TYPE_QUEUE;
341                         return tm_node;
342                 }
343         }
344
345         return NULL;
346 }
347
348 static int
349 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
350                                  struct rte_tm_node_params *params,
351                                  struct rte_tm_error *error)
352 {
353         struct hns3_tm_shaper_profile *shaper_profile;
354
355         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
356                 shaper_profile = hns3_tm_shaper_profile_search(dev,
357                                  params->shaper_profile_id);
358                 if (shaper_profile == NULL) {
359                         error->type =
360                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
361                         error->message = "shaper profile not exist";
362                         return -EINVAL;
363                 }
364         }
365
366         if (params->nonleaf.wfq_weight_mode) {
367                 error->type =
368                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
369                 error->message = "WFQ not supported";
370                 return -EINVAL;
371         }
372
373         if (params->nonleaf.n_sp_priorities != 1) {
374                 error->type =
375                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
376                 error->message = "SP priority not supported";
377                 return -EINVAL;
378         }
379
380         return 0;
381 }
382
383 static int
384 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
385                               struct rte_tm_node_params *params,
386                               struct rte_tm_error *error)
387
388 {
389         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
390                 error->type =
391                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
392                 error->message = "shaper not supported";
393                 return -EINVAL;
394         }
395
396         if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
397                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
398                 error->message = "congestion management not supported";
399                 return -EINVAL;
400         }
401
402         if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
403                 error->type =
404                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
405                 error->message = "WRED not supported";
406                 return -EINVAL;
407         }
408
409         if (params->leaf.wred.shared_wred_context_id) {
410                 error->type =
411                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
412                 error->message = "WRED not supported";
413                 return -EINVAL;
414         }
415
416         if (params->leaf.wred.n_shared_wred_contexts) {
417                 error->type =
418                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
419                 error->message = "WRED not supported";
420                 return -EINVAL;
421         }
422
423         return 0;
424 }
425
426 static int
427 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
428                          uint32_t priority, uint32_t weight,
429                          struct rte_tm_node_params *params,
430                          struct rte_tm_error *error)
431 {
432         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
433         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
434
435         if (node_id == RTE_TM_NODE_ID_NULL) {
436                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
437                 error->message = "invalid node id";
438                 return -EINVAL;
439         }
440
441         if (hns3_tm_node_search(dev, node_id, &node_type)) {
442                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
443                 error->message = "node id already used";
444                 return -EINVAL;
445         }
446
447         if (priority) {
448                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
449                 error->message = "priority should be 0";
450                 return -EINVAL;
451         }
452
453         if (weight != 1) {
454                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
455                 error->message = "weight must be 1";
456                 return -EINVAL;
457         }
458
459         if (params->shared_shaper_id) {
460                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
461                 error->message = "shared shaper not supported";
462                 return -EINVAL;
463         }
464         if (params->n_shared_shapers) {
465                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
466                 error->message = "shared shaper not supported";
467                 return -EINVAL;
468         }
469
470         if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
471                 return hns3_tm_nonleaf_node_param_check(dev, params, error);
472         else
473                 return hns3_tm_leaf_node_param_check(dev, params, error);
474 }
475
476 static int
477 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
478                       uint32_t level_id, struct rte_tm_node_params *params,
479                       struct rte_tm_error *error)
480 {
481         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
482         struct hns3_tm_node *tm_node;
483
484         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
485             level_id != HNS3_TM_NODE_LEVEL_PORT) {
486                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
487                 error->message = "wrong level";
488                 return -EINVAL;
489         }
490
491         if (node_id != pf->tm_conf.nb_nodes_max - 1) {
492                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
493                 error->message = "invalid port node ID";
494                 return -EINVAL;
495         }
496
497         if (pf->tm_conf.root) {
498                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
499                 error->message = "already have a root";
500                 return -EINVAL;
501         }
502
503         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
504         if (tm_node == NULL)
505                 return -ENOMEM;
506
507         tm_node->id = node_id;
508         tm_node->reference_count = 0;
509         tm_node->parent = NULL;
510         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
511                                   params->shaper_profile_id);
512         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
513         pf->tm_conf.root = tm_node;
514
515         if (tm_node->shaper_profile)
516                 tm_node->shaper_profile->reference_count++;
517
518         return 0;
519 }
520
521 static int
522 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
523                     uint32_t level_id, struct hns3_tm_node *parent_node,
524                     struct rte_tm_node_params *params,
525                     struct rte_tm_error *error)
526 {
527         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
528         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
529         struct hns3_tm_node *tm_node;
530
531         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
532             level_id != HNS3_TM_NODE_LEVEL_TC) {
533                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
534                 error->message = "wrong level";
535                 return -EINVAL;
536         }
537
538         if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
539             node_id < pf->tm_conf.nb_leaf_nodes_max ||
540             hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
541                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
542                 error->message = "invalid tc node ID";
543                 return -EINVAL;
544         }
545
546         if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
547                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
548                 error->message = "too many TCs";
549                 return -EINVAL;
550         }
551
552         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
553         if (tm_node == NULL)
554                 return -ENOMEM;
555
556         tm_node->id = node_id;
557         tm_node->reference_count = 0;
558         tm_node->parent = parent_node;
559         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
560                                         params->shaper_profile_id);
561         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
562         TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
563         pf->tm_conf.nb_tc_node++;
564         tm_node->parent->reference_count++;
565
566         if (tm_node->shaper_profile)
567                 tm_node->shaper_profile->reference_count++;
568
569         return 0;
570 }
571
572 static int
573 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
574                        uint32_t level_id, struct hns3_tm_node *parent_node,
575                        struct rte_tm_node_params *params,
576                        struct rte_tm_error *error)
577 {
578         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
580         struct hns3_tm_node *tm_node;
581
582         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
583             level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
584                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
585                 error->message = "wrong level";
586                 return -EINVAL;
587         }
588
589         /* note: dev->data->nb_tx_queues <= max_tx_queues */
590         if (node_id >= dev->data->nb_tx_queues) {
591                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
592                 error->message = "invalid queue node ID";
593                 return -EINVAL;
594         }
595
596         if (hns3_txq_mapped_tc_get(hw, node_id) !=
597             hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
598                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
599                 error->message = "queue's TC not match parent's TC";
600                 return -EINVAL;
601         }
602
603         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
604         if (tm_node == NULL)
605                 return -ENOMEM;
606
607         tm_node->id = node_id;
608         tm_node->reference_count = 0;
609         tm_node->parent = parent_node;
610         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
611         TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
612         pf->tm_conf.nb_queue_node++;
613         tm_node->parent->reference_count++;
614
615         return 0;
616 }
617
618 static int
619 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
620                  uint32_t parent_node_id, uint32_t priority,
621                  uint32_t weight, uint32_t level_id,
622                  struct rte_tm_node_params *params,
623                  struct rte_tm_error *error)
624 {
625         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
626         enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
627         struct hns3_tm_node *parent_node;
628         int ret;
629
630         if (params == NULL || error == NULL)
631                 return -EINVAL;
632
633         if (pf->tm_conf.committed) {
634                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
635                 error->message = "already committed";
636                 return -EINVAL;
637         }
638
639         ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
640                                        params, error);
641         if (ret)
642                 return ret;
643
644         /* root node who don't have a parent */
645         if (parent_node_id == RTE_TM_NODE_ID_NULL)
646                 return hns3_tm_port_node_add(dev, node_id, level_id,
647                                              params, error);
648
649         parent_node = hns3_tm_node_search(dev, parent_node_id,
650                                           &parent_node_type);
651         if (parent_node == NULL) {
652                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
653                 error->message = "parent not exist";
654                 return -EINVAL;
655         }
656
657         if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
658             parent_node_type != HNS3_TM_NODE_TYPE_TC) {
659                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
660                 error->message = "parent is not port or TC";
661                 return -EINVAL;
662         }
663
664         if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
665                 return hns3_tm_tc_node_add(dev, node_id, level_id,
666                                            parent_node, params, error);
667         else
668                 return hns3_tm_queue_node_add(dev, node_id, level_id,
669                                               parent_node, params, error);
670 }
671
672 static void
673 hns3_tm_node_do_delete(struct hns3_pf *pf,
674                        enum hns3_tm_node_type node_type,
675                        struct hns3_tm_node *tm_node)
676 {
677         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
678                 if (tm_node->shaper_profile)
679                         tm_node->shaper_profile->reference_count--;
680                 rte_free(tm_node);
681                 pf->tm_conf.root = NULL;
682                 return;
683         }
684
685         if (tm_node->shaper_profile)
686                 tm_node->shaper_profile->reference_count--;
687         tm_node->parent->reference_count--;
688         if (node_type == HNS3_TM_NODE_TYPE_TC) {
689                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
690                 pf->tm_conf.nb_tc_node--;
691         } else {
692                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
693                 pf->tm_conf.nb_queue_node--;
694         }
695         rte_free(tm_node);
696 }
697
698 static int
699 hns3_tm_node_delete(struct rte_eth_dev *dev,
700                     uint32_t node_id,
701                     struct rte_tm_error *error)
702 {
703         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
704         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
705         struct hns3_tm_node *tm_node;
706
707         if (error == NULL)
708                 return -EINVAL;
709
710         if (pf->tm_conf.committed) {
711                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
712                 error->message = "already committed";
713                 return -EINVAL;
714         }
715
716         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
717         if (tm_node == NULL) {
718                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
719                 error->message = "no such node";
720                 return -EINVAL;
721         }
722
723         if (tm_node->reference_count) {
724                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
725                 error->message = "cannot delete a node which has children";
726                 return -EINVAL;
727         }
728
729         hns3_tm_node_do_delete(pf, node_type, tm_node);
730
731         return 0;
732 }
733
734 static int
735 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
736                       int *is_leaf, struct rte_tm_error *error)
737 {
738         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
739         struct hns3_tm_node *tm_node;
740
741         if (is_leaf == NULL || error == NULL)
742                 return -EINVAL;
743
744         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
745         if (tm_node == NULL) {
746                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
747                 error->message = "no such node";
748                 return -EINVAL;
749         }
750
751         if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
752                 *is_leaf = true;
753         else
754                 *is_leaf = false;
755
756         return 0;
757 }
758
759 static void
760 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
761                                        uint32_t level_id,
762                                        struct rte_tm_level_capabilities *cap)
763 {
764         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
765         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
766
767         if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
768                 cap->n_nodes_max = 1;
769                 cap->n_nodes_nonleaf_max = 1;
770                 cap->n_nodes_leaf_max = 0;
771         } else {
772                 cap->n_nodes_max = HNS3_MAX_TC_NUM;
773                 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
774                 cap->n_nodes_leaf_max = 0;
775         }
776
777         cap->non_leaf_nodes_identical = 1;
778         cap->leaf_nodes_identical = 1;
779
780         cap->nonleaf.shaper_private_supported = true;
781         cap->nonleaf.shaper_private_dual_rate_supported = false;
782         cap->nonleaf.shaper_private_rate_min = 0;
783         cap->nonleaf.shaper_private_rate_max =
784                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
785         cap->nonleaf.shaper_shared_n_max = 0;
786         if (level_id == HNS3_TM_NODE_LEVEL_PORT)
787                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
788         else
789                 cap->nonleaf.sched_n_children_max = max_tx_queues;
790         cap->nonleaf.sched_sp_n_priorities_max = 1;
791         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
792         cap->nonleaf.sched_wfq_n_groups_max = 0;
793         cap->nonleaf.sched_wfq_weight_max = 1;
794         cap->nonleaf.stats_mask = 0;
795 }
796
797 static void
798 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
799                                     struct rte_tm_level_capabilities *cap)
800 {
801         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
802
803         cap->n_nodes_max = max_tx_queues;
804         cap->n_nodes_nonleaf_max = 0;
805         cap->n_nodes_leaf_max = max_tx_queues;
806
807         cap->non_leaf_nodes_identical = 1;
808         cap->leaf_nodes_identical = 1;
809
810         cap->leaf.shaper_private_supported = false;
811         cap->leaf.shaper_private_dual_rate_supported = false;
812         cap->leaf.shaper_private_rate_min = 0;
813         cap->leaf.shaper_private_rate_max = 0;
814         cap->leaf.shaper_shared_n_max = 0;
815         cap->leaf.cman_head_drop_supported = false;
816         cap->leaf.cman_wred_context_private_supported = false;
817         cap->leaf.cman_wred_context_shared_n_max = 0;
818         cap->leaf.stats_mask = 0;
819 }
820
821 static int
822 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
823                                uint32_t level_id,
824                                struct rte_tm_level_capabilities *cap,
825                                struct rte_tm_error *error)
826 {
827         if (cap == NULL || error == NULL)
828                 return -EINVAL;
829
830         if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
831                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
832                 error->message = "too deep level";
833                 return -EINVAL;
834         }
835
836         memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
837
838         if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
839                 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
840         else
841                 hns3_tm_leaf_level_capabilities_get(dev, cap);
842
843         return 0;
844 }
845
846 static void
847 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
848                                       enum hns3_tm_node_type node_type,
849                                       struct rte_tm_node_capabilities *cap)
850 {
851         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
853
854         cap->shaper_private_supported = true;
855         cap->shaper_private_dual_rate_supported = false;
856         cap->shaper_private_rate_min = 0;
857         cap->shaper_private_rate_max =
858                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
859         cap->shaper_shared_n_max = 0;
860
861         if (node_type == HNS3_TM_NODE_TYPE_PORT)
862                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
863         else
864                 cap->nonleaf.sched_n_children_max = max_tx_queues;
865         cap->nonleaf.sched_sp_n_priorities_max = 1;
866         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
867         cap->nonleaf.sched_wfq_n_groups_max = 0;
868         cap->nonleaf.sched_wfq_weight_max = 1;
869
870         cap->stats_mask = 0;
871 }
872
873 static void
874 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
875                                    struct rte_tm_node_capabilities *cap)
876 {
877         cap->shaper_private_supported = false;
878         cap->shaper_private_dual_rate_supported = false;
879         cap->shaper_private_rate_min = 0;
880         cap->shaper_private_rate_max = 0;
881         cap->shaper_shared_n_max = 0;
882
883         cap->leaf.cman_head_drop_supported = false;
884         cap->leaf.cman_wred_context_private_supported = false;
885         cap->leaf.cman_wred_context_shared_n_max = 0;
886
887         cap->stats_mask = 0;
888 }
889
890 static int
891 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
892                               uint32_t node_id,
893                               struct rte_tm_node_capabilities *cap,
894                               struct rte_tm_error *error)
895 {
896         enum hns3_tm_node_type node_type;
897         struct hns3_tm_node *tm_node;
898
899         if (cap == NULL || error == NULL)
900                 return -EINVAL;
901
902         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
903         if (tm_node == NULL) {
904                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
905                 error->message = "no such node";
906                 return -EINVAL;
907         }
908
909         memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
910
911         if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
912                 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
913         else
914                 hns3_tm_leaf_node_capabilities_get(dev, cap);
915
916         return 0;
917 }
918
919 static int
920 hns3_tm_config_port_rate(struct hns3_hw *hw,
921                          struct hns3_tm_shaper_profile *shaper_profile)
922 {
923         struct hns3_port_limit_rate_cmd *cfg;
924         struct hns3_cmd_desc desc;
925         uint32_t firmware_rate;
926         uint64_t rate;
927         int ret;
928
929         if (shaper_profile) {
930                 rate = shaper_profile->profile.peak.rate;
931                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
932         } else {
933                 firmware_rate = hw->max_tm_rate;
934         }
935
936         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
937         cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
938         cfg->speed = rte_cpu_to_le_32(firmware_rate);
939
940         ret = hns3_cmd_send(hw, &desc, 1);
941         if (ret)
942                 hns3_err(hw, "failed to config port rate, ret = %d", ret);
943
944         return ret;
945 }
946
947 static int
948 hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
949                        struct hns3_tm_shaper_profile *shaper_profile)
950 {
951         struct hns3_tc_limit_rate_cmd *cfg;
952         struct hns3_cmd_desc desc;
953         uint32_t firmware_rate;
954         uint64_t rate;
955         int ret;
956
957         if (shaper_profile) {
958                 rate = shaper_profile->profile.peak.rate;
959                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
960         } else {
961                 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
962         }
963
964         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
965         cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
966         cfg->speed = rte_cpu_to_le_32(firmware_rate);
967         cfg->tc_id = tc_no;
968
969         ret = hns3_cmd_send(hw, &desc, 1);
970         if (ret)
971                 hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
972                          tc_no, ret);
973
974         return ret;
975 }
976
977 static bool
978 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
979 {
980         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
981         struct hns3_tm_conf *tm_conf = &pf->tm_conf;
982         struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
983         struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
984         struct hns3_tm_node *tm_node;
985
986         /* TC */
987         TAILQ_FOREACH(tm_node, tc_list, node) {
988                 if (!tm_node->reference_count) {
989                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
990                         error->message = "TC without queue assigned";
991                         return false;
992                 }
993
994                 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
995                         hw->num_tc) {
996                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
997                         error->message = "node's TC not exist";
998                         return false;
999                 }
1000         }
1001
1002         /* Queue */
1003         TAILQ_FOREACH(tm_node, queue_list, node) {
1004                 if (tm_node->id >= hw->data->nb_tx_queues) {
1005                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1006                         error->message = "node's queue invalid";
1007                         return false;
1008                 }
1009
1010                 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
1011                     hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
1012                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1013                         error->message = "queue's TC not match parent's TC";
1014                         return false;
1015                 }
1016         }
1017
1018         return true;
1019 }
1020
1021 static int
1022 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1023                             struct rte_tm_error *error)
1024 {
1025         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1026         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1027         struct hns3_tm_node *tm_node;
1028         uint8_t tc_no;
1029         int ret;
1030
1031         /* port */
1032         tm_node = pf->tm_conf.root;
1033         if (tm_node->shaper_profile) {
1034                 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1035                 if (ret) {
1036                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1037                         error->message = "fail to set port peak rate";
1038                         return -EIO;
1039                 }
1040         }
1041
1042         /* TC */
1043         TAILQ_FOREACH(tm_node, tc_list, node) {
1044                 if (tm_node->shaper_profile == NULL)
1045                         continue;
1046
1047                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1048                 ret = hns3_tm_config_tc_rate(hw, tc_no,
1049                                              tm_node->shaper_profile);
1050                 if (ret) {
1051                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1052                         error->message = "fail to set TC peak rate";
1053                         return -EIO;
1054                 }
1055         }
1056
1057         return 0;
1058 }
1059
1060 static int
1061 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1062                          int clear_on_fail,
1063                          struct rte_tm_error *error)
1064 {
1065         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1067         int ret;
1068
1069         if (error == NULL)
1070                 return -EINVAL;
1071
1072         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1073                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1074                 error->message = "device is resetting";
1075                 /* don't goto fail_clear, user may try later */
1076                 return -EBUSY;
1077         }
1078
1079         if (pf->tm_conf.root == NULL)
1080                 goto done;
1081
1082         /* check configure before commit make sure key configure not violated */
1083         if (!hns3_tm_configure_check(hw, error))
1084                 goto fail_clear;
1085
1086         ret = hns3_tm_hierarchy_do_commit(hw, error);
1087         if (ret)
1088                 goto fail_clear;
1089
1090 done:
1091         pf->tm_conf.committed = true;
1092         return 0;
1093
1094 fail_clear:
1095         if (clear_on_fail) {
1096                 hns3_tm_conf_uninit(dev);
1097                 hns3_tm_conf_init(dev);
1098         }
1099         return -EINVAL;
1100 }
1101
1102 static int
1103 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1104                               int clear_on_fail,
1105                               struct rte_tm_error *error)
1106 {
1107         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1108         int ret;
1109
1110         rte_spinlock_lock(&hw->lock);
1111         ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1112         rte_spinlock_unlock(&hw->lock);
1113
1114         return ret;
1115 }
1116
1117 static int
1118 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1119                               uint32_t node_id,
1120                               enum hns3_tm_node_type node_type,
1121                               struct hns3_tm_shaper_profile *shaper_profile,
1122                               struct rte_tm_error *error)
1123 {
1124         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1125         uint8_t tc_no;
1126         int ret;
1127
1128         if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1129                 if (shaper_profile != NULL) {
1130                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1131                         error->message = "queue node shaper not supported";
1132                         return -EINVAL;
1133                 }
1134                 return 0;
1135         }
1136
1137         if (!pf->tm_conf.committed)
1138                 return 0;
1139
1140         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1141                 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1142                 if (ret) {
1143                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1144                         error->message = "fail to update port peak rate";
1145                 }
1146
1147                 return ret;
1148         }
1149
1150         /*
1151          * update TC's shaper
1152          */
1153         tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1154         ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1155         if (ret) {
1156                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1157                 error->message = "fail to update TC peak rate";
1158         }
1159
1160         return ret;
1161 }
1162
1163 static int
1164 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1165                            uint32_t node_id,
1166                            uint32_t shaper_profile_id,
1167                            struct rte_tm_error *error)
1168 {
1169         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1171         struct hns3_tm_shaper_profile *profile = NULL;
1172         struct hns3_tm_node *tm_node;
1173
1174         if (error == NULL)
1175                 return -EINVAL;
1176
1177         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1178                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1179                 error->message = "device is resetting";
1180                 return -EBUSY;
1181         }
1182
1183         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1184         if (tm_node == NULL) {
1185                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1186                 error->message = "no such node";
1187                 return -EINVAL;
1188         }
1189
1190         if (shaper_profile_id == tm_node->params.shaper_profile_id)
1191                 return 0;
1192
1193         if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1194                 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1195                 if (profile == NULL) {
1196                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1197                         error->message = "profile ID not exist";
1198                         return -EINVAL;
1199                 }
1200         }
1201
1202         if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1203                                           profile, error))
1204                 return -EINVAL;
1205
1206         if (tm_node->shaper_profile)
1207                 tm_node->shaper_profile->reference_count--;
1208         tm_node->shaper_profile = profile;
1209         tm_node->params.shaper_profile_id = shaper_profile_id;
1210         if (profile != NULL)
1211                 profile->reference_count++;
1212
1213         return 0;
1214 }
1215
1216 static int
1217 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1218                                 uint32_t node_id,
1219                                 uint32_t shaper_profile_id,
1220                                 struct rte_tm_error *error)
1221 {
1222         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1223         int ret;
1224
1225         rte_spinlock_lock(&hw->lock);
1226         ret = hns3_tm_node_shaper_update(dev, node_id,
1227                                          shaper_profile_id, error);
1228         rte_spinlock_unlock(&hw->lock);
1229
1230         return ret;
1231 }
1232
1233 static const struct rte_tm_ops hns3_tm_ops = {
1234         .capabilities_get       = hns3_tm_capabilities_get,
1235         .shaper_profile_add     = hns3_tm_shaper_profile_add,
1236         .shaper_profile_delete  = hns3_tm_shaper_profile_del,
1237         .node_add               = hns3_tm_node_add,
1238         .node_delete            = hns3_tm_node_delete,
1239         .node_type_get          = hns3_tm_node_type_get,
1240         .level_capabilities_get = hns3_tm_level_capabilities_get,
1241         .node_capabilities_get  = hns3_tm_node_capabilities_get,
1242         .hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
1243         .node_shaper_update     = hns3_tm_node_shaper_update_wrap,
1244 };
1245
1246 int
1247 hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
1248 {
1249         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1250
1251         if (arg == NULL)
1252                 return -EINVAL;
1253
1254         if (!hns3_dev_tm_supported(hw))
1255                 return -EOPNOTSUPP;
1256
1257         *(const void **)arg = &hns3_tm_ops;
1258
1259         return 0;
1260 }
1261
1262 void
1263 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1264 {
1265         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1266
1267         if (!hns3_dev_tm_supported(hw))
1268                 return;
1269
1270         if (pf->tm_conf.root && !pf->tm_conf.committed)
1271                 hns3_warn(hw,
1272                     "please call hierarchy_commit() before starting the port.");
1273 }
1274
1275 /*
1276  * We need clear tm_conf committed flag when device stop so that user can modify
1277  * tm configuration (e.g. add or delete node).
1278  *
1279  * If user don't call hierarchy commit when device start later, the Port/TC's
1280  * shaper rate still the same as previous committed.
1281  *
1282  * To avoid the above problem, we need recover Port/TC shaper rate when device
1283  * stop.
1284  */
1285 void
1286 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1287 {
1288         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1289         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1290         struct hns3_tm_node *tm_node;
1291         uint8_t tc_no;
1292
1293         if (!pf->tm_conf.committed)
1294                 return;
1295
1296         tm_node = pf->tm_conf.root;
1297         if (tm_node != NULL && tm_node->shaper_profile)
1298                 (void)hns3_tm_config_port_rate(hw, NULL);
1299
1300         TAILQ_FOREACH(tm_node, tc_list, node) {
1301                 if (tm_node->shaper_profile == NULL)
1302                         continue;
1303                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1304                 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1305         }
1306
1307         pf->tm_conf.committed = false;
1308 }
1309
1310 int
1311 hns3_tm_conf_update(struct hns3_hw *hw)
1312 {
1313         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1314         struct rte_tm_error error;
1315
1316         if (!hns3_dev_tm_supported(hw))
1317                 return 0;
1318
1319         if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1320                 return 0;
1321
1322         memset(&error, 0, sizeof(struct rte_tm_error));
1323         return hns3_tm_hierarchy_do_commit(hw, &error);
1324 }