net/hns3: fix traffic management support check
[dpdk.git] / drivers / net / hns3 / hns3_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 HiSilicon Limited.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "hns3_ethdev.h"
8 #include "hns3_dcb.h"
9 #include "hns3_logs.h"
10 #include "hns3_tm.h"
11
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
14 {
15         /*
16          * This API will called in pci device probe stage, we can't call
17          * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18          * not setup), so we call the hns3_dev_infos_get.
19          */
20         struct rte_eth_dev_info dev_info;
21
22         memset(&dev_info, 0, sizeof(dev_info));
23         (void)hns3_dev_infos_get(dev, &dev_info);
24         return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
25 }
26
27 void
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
29 {
30         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
32
33         pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
34         pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
35         pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
36
37         TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
38         pf->tm_conf.nb_shaper_profile = 0;
39
40         pf->tm_conf.root = NULL;
41         TAILQ_INIT(&pf->tm_conf.tc_list);
42         TAILQ_INIT(&pf->tm_conf.queue_list);
43         pf->tm_conf.nb_tc_node = 0;
44         pf->tm_conf.nb_queue_node = 0;
45
46         pf->tm_conf.committed = false;
47 }
48
49 void
50 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
51 {
52         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
53         struct hns3_tm_shaper_profile *shaper_profile;
54         struct hns3_tm_node *tm_node;
55
56         if (pf->tm_conf.nb_queue_node > 0) {
57                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
58                         TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
59                         rte_free(tm_node);
60                 }
61                 pf->tm_conf.nb_queue_node = 0;
62         }
63
64         if (pf->tm_conf.nb_tc_node > 0) {
65                 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
66                         TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
67                         rte_free(tm_node);
68                 }
69                 pf->tm_conf.nb_tc_node = 0;
70         }
71
72         if (pf->tm_conf.root != NULL) {
73                 rte_free(pf->tm_conf.root);
74                 pf->tm_conf.root = NULL;
75         }
76
77         if (pf->tm_conf.nb_shaper_profile > 0) {
78                 while ((shaper_profile =
79                        TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
80                         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
81                                      shaper_profile, node);
82                         rte_free(shaper_profile);
83                 }
84                 pf->tm_conf.nb_shaper_profile = 0;
85         }
86
87         pf->tm_conf.nb_leaf_nodes_max = 0;
88         pf->tm_conf.nb_nodes_max = 0;
89         pf->tm_conf.nb_shaper_profile_max = 0;
90 }
91
92 static inline uint64_t
93 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
94 {
95 #define FIRMWARE_TO_TM_RATE_SCALE       125000
96         /* tm rate unit is Bps, firmware rate is Mbps */
97         return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
98 }
99
100 static inline uint32_t
101 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
102 {
103 #define TM_TO_FIRMWARE_RATE_SCALE       125000
104         /* tm rate unit is Bps, firmware rate is Mbps */
105         return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
106 }
107
108 static int
109 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
110                          struct rte_tm_capabilities *cap,
111                          struct rte_tm_error *error)
112 {
113         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
114         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
115
116         if (cap == NULL || error == NULL)
117                 return -EINVAL;
118
119         error->type = RTE_TM_ERROR_TYPE_NONE;
120
121         memset(cap, 0, sizeof(struct rte_tm_capabilities));
122
123         cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
124         cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
125         cap->non_leaf_nodes_identical = 1;
126         cap->leaf_nodes_identical = 1;
127         cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
128         cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
129         cap->shaper_private_dual_rate_n_max = 0;
130         cap->shaper_private_rate_min = 0;
131         cap->shaper_private_rate_max =
132                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
133         cap->shaper_shared_n_max = 0;
134         cap->shaper_shared_n_nodes_per_shaper_max = 0;
135         cap->shaper_shared_n_shapers_per_node_max = 0;
136         cap->shaper_shared_dual_rate_n_max = 0;
137         cap->shaper_shared_rate_min = 0;
138         cap->shaper_shared_rate_max = 0;
139
140         cap->sched_n_children_max = max_tx_queues;
141         cap->sched_sp_n_priorities_max = 1;
142         cap->sched_wfq_n_children_per_group_max = 0;
143         cap->sched_wfq_n_groups_max = 0;
144         cap->sched_wfq_weight_max = 1;
145
146         cap->cman_head_drop_supported = 0;
147         cap->dynamic_update_mask = 0;
148         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
149         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
150         cap->cman_wred_context_n_max = 0;
151         cap->cman_wred_context_private_n_max = 0;
152         cap->cman_wred_context_shared_n_max = 0;
153         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
154         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
155         cap->stats_mask = 0;
156
157         return 0;
158 }
159
160 static struct hns3_tm_shaper_profile *
161 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
162                               uint32_t shaper_profile_id)
163 {
164         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
165         struct hns3_shaper_profile_list *shaper_profile_list =
166                 &pf->tm_conf.shaper_profile_list;
167         struct hns3_tm_shaper_profile *shaper_profile;
168
169         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
170                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
171                         return shaper_profile;
172         }
173
174         return NULL;
175 }
176
177 static int
178 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
179                                    struct rte_tm_shaper_params *profile,
180                                    struct rte_tm_error *error)
181 {
182         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
183
184         if (profile->committed.rate) {
185                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
186                 error->message = "committed rate not supported";
187                 return -EINVAL;
188         }
189
190         if (profile->committed.size) {
191                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
192                 error->message = "committed bucket size not supported";
193                 return -EINVAL;
194         }
195
196         if (profile->peak.rate >
197             hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
198                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
199                 error->message = "peak rate too large";
200                 return -EINVAL;
201         }
202
203         if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
204                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
205                 error->message = "peak rate must be at least 1Mbps";
206                 return -EINVAL;
207         }
208
209         if (profile->peak.size) {
210                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
211                 error->message = "peak bucket size not supported";
212                 return -EINVAL;
213         }
214
215         if (profile->pkt_length_adjust) {
216                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
217                 error->message = "packet length adjustment not supported";
218                 return -EINVAL;
219         }
220
221         if (profile->packet_mode) {
222                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
223                 error->message = "packet mode not supported";
224                 return -EINVAL;
225         }
226
227         return 0;
228 }
229
230 static int
231 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
232                            uint32_t shaper_profile_id,
233                            struct rte_tm_shaper_params *profile,
234                            struct rte_tm_error *error)
235 {
236         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
237         struct hns3_tm_shaper_profile *shaper_profile;
238         int ret;
239
240         if (profile == NULL || error == NULL)
241                 return -EINVAL;
242
243         if (pf->tm_conf.nb_shaper_profile >=
244             pf->tm_conf.nb_shaper_profile_max) {
245                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
246                 error->message = "too much profiles";
247                 return -EINVAL;
248         }
249
250         ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
251         if (ret)
252                 return ret;
253
254         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
255         if (shaper_profile) {
256                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
257                 error->message = "profile ID exist";
258                 return -EINVAL;
259         }
260
261         shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
262                                      sizeof(struct hns3_tm_shaper_profile),
263                                      0);
264         if (shaper_profile == NULL)
265                 return -ENOMEM;
266
267         shaper_profile->shaper_profile_id = shaper_profile_id;
268         memcpy(&shaper_profile->profile, profile,
269                sizeof(struct rte_tm_shaper_params));
270         TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
271                           shaper_profile, node);
272         pf->tm_conf.nb_shaper_profile++;
273
274         return 0;
275 }
276
277 static int
278 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
279                            uint32_t shaper_profile_id,
280                            struct rte_tm_error *error)
281 {
282         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
283         struct hns3_tm_shaper_profile *shaper_profile;
284
285         if (error == NULL)
286                 return -EINVAL;
287
288         shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
289         if (shaper_profile == NULL) {
290                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
291                 error->message = "profile ID not exist";
292                 return -EINVAL;
293         }
294
295         if (shaper_profile->reference_count) {
296                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
297                 error->message = "profile in use";
298                 return -EINVAL;
299         }
300
301         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
302         rte_free(shaper_profile);
303         pf->tm_conf.nb_shaper_profile--;
304
305         return 0;
306 }
307
308 static struct hns3_tm_node *
309 hns3_tm_node_search(struct rte_eth_dev *dev,
310                     uint32_t node_id,
311                     enum hns3_tm_node_type *node_type)
312 {
313         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
314         struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
315         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
316         struct hns3_tm_node *tm_node;
317
318         if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
319                 *node_type = HNS3_TM_NODE_TYPE_PORT;
320                 return pf->tm_conf.root;
321         }
322
323         TAILQ_FOREACH(tm_node, tc_list, node) {
324                 if (tm_node->id == node_id) {
325                         *node_type = HNS3_TM_NODE_TYPE_TC;
326                         return tm_node;
327                 }
328         }
329
330         TAILQ_FOREACH(tm_node, queue_list, node) {
331                 if (tm_node->id == node_id) {
332                         *node_type = HNS3_TM_NODE_TYPE_QUEUE;
333                         return tm_node;
334                 }
335         }
336
337         return NULL;
338 }
339
340 static int
341 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
342                                  struct rte_tm_node_params *params,
343                                  struct rte_tm_error *error)
344 {
345         struct hns3_tm_shaper_profile *shaper_profile;
346
347         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
348                 shaper_profile = hns3_tm_shaper_profile_search(dev,
349                                  params->shaper_profile_id);
350                 if (shaper_profile == NULL) {
351                         error->type =
352                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
353                         error->message = "shaper profile not exist";
354                         return -EINVAL;
355                 }
356         }
357
358         if (params->nonleaf.wfq_weight_mode) {
359                 error->type =
360                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
361                 error->message = "WFQ not supported";
362                 return -EINVAL;
363         }
364
365         if (params->nonleaf.n_sp_priorities != 1) {
366                 error->type =
367                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
368                 error->message = "SP priority not supported";
369                 return -EINVAL;
370         }
371
372         return 0;
373 }
374
375 static int
376 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
377                               struct rte_tm_node_params *params,
378                               struct rte_tm_error *error)
379
380 {
381         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
382                 error->type =
383                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
384                 error->message = "shaper not supported";
385                 return -EINVAL;
386         }
387
388         if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
389                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
390                 error->message = "congestion management not supported";
391                 return -EINVAL;
392         }
393
394         if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
395                 error->type =
396                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
397                 error->message = "WRED not supported";
398                 return -EINVAL;
399         }
400
401         if (params->leaf.wred.shared_wred_context_id) {
402                 error->type =
403                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
404                 error->message = "WRED not supported";
405                 return -EINVAL;
406         }
407
408         if (params->leaf.wred.n_shared_wred_contexts) {
409                 error->type =
410                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
411                 error->message = "WRED not supported";
412                 return -EINVAL;
413         }
414
415         return 0;
416 }
417
418 static int
419 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
420                          uint32_t priority, uint32_t weight,
421                          struct rte_tm_node_params *params,
422                          struct rte_tm_error *error)
423 {
424         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
425         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
426
427         if (node_id == RTE_TM_NODE_ID_NULL) {
428                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
429                 error->message = "invalid node id";
430                 return -EINVAL;
431         }
432
433         if (hns3_tm_node_search(dev, node_id, &node_type)) {
434                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
435                 error->message = "node id already used";
436                 return -EINVAL;
437         }
438
439         if (priority) {
440                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
441                 error->message = "priority should be 0";
442                 return -EINVAL;
443         }
444
445         if (weight != 1) {
446                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
447                 error->message = "weight must be 1";
448                 return -EINVAL;
449         }
450
451         if (params->shared_shaper_id) {
452                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
453                 error->message = "shared shaper not supported";
454                 return -EINVAL;
455         }
456         if (params->n_shared_shapers) {
457                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
458                 error->message = "shared shaper not supported";
459                 return -EINVAL;
460         }
461
462         if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
463                 return hns3_tm_nonleaf_node_param_check(dev, params, error);
464         else
465                 return hns3_tm_leaf_node_param_check(dev, params, error);
466 }
467
468 static int
469 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
470                       uint32_t level_id, struct rte_tm_node_params *params,
471                       struct rte_tm_error *error)
472 {
473         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
474         struct hns3_tm_node *tm_node;
475
476         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
477             level_id != HNS3_TM_NODE_LEVEL_PORT) {
478                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
479                 error->message = "wrong level";
480                 return -EINVAL;
481         }
482
483         if (node_id != pf->tm_conf.nb_nodes_max - 1) {
484                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
485                 error->message = "invalid port node ID";
486                 return -EINVAL;
487         }
488
489         if (pf->tm_conf.root) {
490                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
491                 error->message = "already have a root";
492                 return -EINVAL;
493         }
494
495         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
496         if (tm_node == NULL)
497                 return -ENOMEM;
498
499         tm_node->id = node_id;
500         tm_node->reference_count = 0;
501         tm_node->parent = NULL;
502         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
503                                   params->shaper_profile_id);
504         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
505         pf->tm_conf.root = tm_node;
506
507         if (tm_node->shaper_profile)
508                 tm_node->shaper_profile->reference_count++;
509
510         return 0;
511 }
512
513 static int
514 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
515                     uint32_t level_id, struct hns3_tm_node *parent_node,
516                     struct rte_tm_node_params *params,
517                     struct rte_tm_error *error)
518 {
519         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
520         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
521         struct hns3_tm_node *tm_node;
522
523         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
524             level_id != HNS3_TM_NODE_LEVEL_TC) {
525                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
526                 error->message = "wrong level";
527                 return -EINVAL;
528         }
529
530         if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
531             node_id < pf->tm_conf.nb_leaf_nodes_max ||
532             hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
533                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
534                 error->message = "invalid tc node ID";
535                 return -EINVAL;
536         }
537
538         if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
539                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
540                 error->message = "too many TCs";
541                 return -EINVAL;
542         }
543
544         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
545         if (tm_node == NULL)
546                 return -ENOMEM;
547
548         tm_node->id = node_id;
549         tm_node->reference_count = 0;
550         tm_node->parent = parent_node;
551         tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
552                                         params->shaper_profile_id);
553         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
554         TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
555         pf->tm_conf.nb_tc_node++;
556         tm_node->parent->reference_count++;
557
558         if (tm_node->shaper_profile)
559                 tm_node->shaper_profile->reference_count++;
560
561         return 0;
562 }
563
564 static int
565 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
566                        uint32_t level_id, struct hns3_tm_node *parent_node,
567                        struct rte_tm_node_params *params,
568                        struct rte_tm_error *error)
569 {
570         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
572         struct hns3_tm_node *tm_node;
573
574         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
575             level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
576                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
577                 error->message = "wrong level";
578                 return -EINVAL;
579         }
580
581         /* note: dev->data->nb_tx_queues <= max_tx_queues */
582         if (node_id >= dev->data->nb_tx_queues) {
583                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
584                 error->message = "invalid queue node ID";
585                 return -EINVAL;
586         }
587
588         if (hns3_txq_mapped_tc_get(hw, node_id) !=
589             hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
590                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
591                 error->message = "queue's TC not match parent's TC";
592                 return -EINVAL;
593         }
594
595         tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
596         if (tm_node == NULL)
597                 return -ENOMEM;
598
599         tm_node->id = node_id;
600         tm_node->reference_count = 0;
601         tm_node->parent = parent_node;
602         memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
603         TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
604         pf->tm_conf.nb_queue_node++;
605         tm_node->parent->reference_count++;
606
607         return 0;
608 }
609
610 static int
611 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
612                  uint32_t parent_node_id, uint32_t priority,
613                  uint32_t weight, uint32_t level_id,
614                  struct rte_tm_node_params *params,
615                  struct rte_tm_error *error)
616 {
617         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
618         enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
619         struct hns3_tm_node *parent_node;
620         int ret;
621
622         if (params == NULL || error == NULL)
623                 return -EINVAL;
624
625         if (pf->tm_conf.committed) {
626                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
627                 error->message = "already committed";
628                 return -EINVAL;
629         }
630
631         ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
632                                        params, error);
633         if (ret)
634                 return ret;
635
636         /* root node who don't have a parent */
637         if (parent_node_id == RTE_TM_NODE_ID_NULL)
638                 return hns3_tm_port_node_add(dev, node_id, level_id,
639                                              params, error);
640
641         parent_node = hns3_tm_node_search(dev, parent_node_id,
642                                           &parent_node_type);
643         if (parent_node == NULL) {
644                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
645                 error->message = "parent not exist";
646                 return -EINVAL;
647         }
648
649         if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
650             parent_node_type != HNS3_TM_NODE_TYPE_TC) {
651                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
652                 error->message = "parent is not port or TC";
653                 return -EINVAL;
654         }
655
656         if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
657                 return hns3_tm_tc_node_add(dev, node_id, level_id,
658                                            parent_node, params, error);
659         else
660                 return hns3_tm_queue_node_add(dev, node_id, level_id,
661                                               parent_node, params, error);
662 }
663
664 static void
665 hns3_tm_node_do_delete(struct hns3_pf *pf,
666                        enum hns3_tm_node_type node_type,
667                        struct hns3_tm_node *tm_node)
668 {
669         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
670                 if (tm_node->shaper_profile)
671                         tm_node->shaper_profile->reference_count--;
672                 rte_free(tm_node);
673                 pf->tm_conf.root = NULL;
674                 return;
675         }
676
677         if (tm_node->shaper_profile)
678                 tm_node->shaper_profile->reference_count--;
679         tm_node->parent->reference_count--;
680         if (node_type == HNS3_TM_NODE_TYPE_TC) {
681                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
682                 pf->tm_conf.nb_tc_node--;
683         } else {
684                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
685                 pf->tm_conf.nb_queue_node--;
686         }
687         rte_free(tm_node);
688 }
689
690 static int
691 hns3_tm_node_delete(struct rte_eth_dev *dev,
692                     uint32_t node_id,
693                     struct rte_tm_error *error)
694 {
695         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
696         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
697         struct hns3_tm_node *tm_node;
698
699         if (error == NULL)
700                 return -EINVAL;
701
702         if (pf->tm_conf.committed) {
703                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
704                 error->message = "already committed";
705                 return -EINVAL;
706         }
707
708         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
709         if (tm_node == NULL) {
710                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
711                 error->message = "no such node";
712                 return -EINVAL;
713         }
714
715         if (tm_node->reference_count) {
716                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
717                 error->message = "cannot delete a node which has children";
718                 return -EINVAL;
719         }
720
721         hns3_tm_node_do_delete(pf, node_type, tm_node);
722
723         return 0;
724 }
725
726 static int
727 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
728                       int *is_leaf, struct rte_tm_error *error)
729 {
730         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
731         struct hns3_tm_node *tm_node;
732
733         if (is_leaf == NULL || error == NULL)
734                 return -EINVAL;
735
736         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
737         if (tm_node == NULL) {
738                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
739                 error->message = "no such node";
740                 return -EINVAL;
741         }
742
743         if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
744                 *is_leaf = true;
745         else
746                 *is_leaf = false;
747
748         return 0;
749 }
750
751 static void
752 hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
753                                        uint32_t level_id,
754                                        struct rte_tm_level_capabilities *cap)
755 {
756         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
757         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
758
759         if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
760                 cap->n_nodes_max = 1;
761                 cap->n_nodes_nonleaf_max = 1;
762                 cap->n_nodes_leaf_max = 0;
763         } else {
764                 cap->n_nodes_max = HNS3_MAX_TC_NUM;
765                 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
766                 cap->n_nodes_leaf_max = 0;
767         }
768
769         cap->non_leaf_nodes_identical = 1;
770         cap->leaf_nodes_identical = 1;
771
772         cap->nonleaf.shaper_private_supported = true;
773         cap->nonleaf.shaper_private_dual_rate_supported = false;
774         cap->nonleaf.shaper_private_rate_min = 0;
775         cap->nonleaf.shaper_private_rate_max =
776                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
777         cap->nonleaf.shaper_shared_n_max = 0;
778         if (level_id == HNS3_TM_NODE_LEVEL_PORT)
779                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
780         else
781                 cap->nonleaf.sched_n_children_max = max_tx_queues;
782         cap->nonleaf.sched_sp_n_priorities_max = 1;
783         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
784         cap->nonleaf.sched_wfq_n_groups_max = 0;
785         cap->nonleaf.sched_wfq_weight_max = 1;
786         cap->nonleaf.stats_mask = 0;
787 }
788
789 static void
790 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
791                                     struct rte_tm_level_capabilities *cap)
792 {
793         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
794
795         cap->n_nodes_max = max_tx_queues;
796         cap->n_nodes_nonleaf_max = 0;
797         cap->n_nodes_leaf_max = max_tx_queues;
798
799         cap->non_leaf_nodes_identical = 1;
800         cap->leaf_nodes_identical = 1;
801
802         cap->leaf.shaper_private_supported = false;
803         cap->leaf.shaper_private_dual_rate_supported = false;
804         cap->leaf.shaper_private_rate_min = 0;
805         cap->leaf.shaper_private_rate_max = 0;
806         cap->leaf.shaper_shared_n_max = 0;
807         cap->leaf.cman_head_drop_supported = false;
808         cap->leaf.cman_wred_context_private_supported = false;
809         cap->leaf.cman_wred_context_shared_n_max = 0;
810         cap->leaf.stats_mask = 0;
811 }
812
813 static int
814 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
815                                uint32_t level_id,
816                                struct rte_tm_level_capabilities *cap,
817                                struct rte_tm_error *error)
818 {
819         if (cap == NULL || error == NULL)
820                 return -EINVAL;
821
822         if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
823                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
824                 error->message = "too deep level";
825                 return -EINVAL;
826         }
827
828         memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
829
830         if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
831                 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
832         else
833                 hns3_tm_leaf_level_capabilities_get(dev, cap);
834
835         return 0;
836 }
837
838 static void
839 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
840                                       enum hns3_tm_node_type node_type,
841                                       struct rte_tm_node_capabilities *cap)
842 {
843         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
844         uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
845
846         cap->shaper_private_supported = true;
847         cap->shaper_private_dual_rate_supported = false;
848         cap->shaper_private_rate_min = 0;
849         cap->shaper_private_rate_max =
850                 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
851         cap->shaper_shared_n_max = 0;
852
853         if (node_type == HNS3_TM_NODE_TYPE_PORT)
854                 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
855         else
856                 cap->nonleaf.sched_n_children_max = max_tx_queues;
857         cap->nonleaf.sched_sp_n_priorities_max = 1;
858         cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
859         cap->nonleaf.sched_wfq_n_groups_max = 0;
860         cap->nonleaf.sched_wfq_weight_max = 1;
861
862         cap->stats_mask = 0;
863 }
864
865 static void
866 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
867                                    struct rte_tm_node_capabilities *cap)
868 {
869         cap->shaper_private_supported = false;
870         cap->shaper_private_dual_rate_supported = false;
871         cap->shaper_private_rate_min = 0;
872         cap->shaper_private_rate_max = 0;
873         cap->shaper_shared_n_max = 0;
874
875         cap->leaf.cman_head_drop_supported = false;
876         cap->leaf.cman_wred_context_private_supported = false;
877         cap->leaf.cman_wred_context_shared_n_max = 0;
878
879         cap->stats_mask = 0;
880 }
881
882 static int
883 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
884                               uint32_t node_id,
885                               struct rte_tm_node_capabilities *cap,
886                               struct rte_tm_error *error)
887 {
888         enum hns3_tm_node_type node_type;
889         struct hns3_tm_node *tm_node;
890
891         if (cap == NULL || error == NULL)
892                 return -EINVAL;
893
894         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
895         if (tm_node == NULL) {
896                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
897                 error->message = "no such node";
898                 return -EINVAL;
899         }
900
901         memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
902
903         if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
904                 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
905         else
906                 hns3_tm_leaf_node_capabilities_get(dev, cap);
907
908         return 0;
909 }
910
911 static int
912 hns3_tm_config_port_rate(struct hns3_hw *hw,
913                          struct hns3_tm_shaper_profile *shaper_profile)
914 {
915         uint32_t firmware_rate;
916         uint64_t rate;
917
918         if (shaper_profile) {
919                 rate = shaper_profile->profile.peak.rate;
920                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
921         } else {
922                 firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
923         }
924
925         /*
926          * The TM shaper topology after device inited:
927          *     pri0 shaper   --->|
928          *     pri1 shaper   --->|
929          *     ...               |----> pg0 shaper ----> port shaper
930          *     ...               |
931          *     priX shaper   --->|
932          *
933          * Because port shaper rate maybe changed by firmware, to avoid
934          * concurrent configure, driver use pg0 shaper to achieve the rate limit
935          * of port.
936          *
937          * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
938          */
939         return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
940 }
941
942 static int
943 hns3_tm_config_tc_rate(struct hns3_hw *hw,
944                        uint8_t tc_no,
945                        struct hns3_tm_shaper_profile *shaper_profile)
946 {
947         uint32_t firmware_rate;
948         uint64_t rate;
949
950         if (shaper_profile) {
951                 rate = shaper_profile->profile.peak.rate;
952                 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
953         } else {
954                 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
955         }
956
957         return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
958 }
959
960 static bool
961 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
962 {
963         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
964         struct hns3_tm_conf *tm_conf = &pf->tm_conf;
965         struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
966         struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
967         struct hns3_tm_node *tm_node;
968
969         /* TC */
970         TAILQ_FOREACH(tm_node, tc_list, node) {
971                 if (!tm_node->reference_count) {
972                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
973                         error->message = "TC without queue assigned";
974                         return false;
975                 }
976
977                 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
978                         hw->num_tc) {
979                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
980                         error->message = "node's TC not exist";
981                         return false;
982                 }
983         }
984
985         /* Queue */
986         TAILQ_FOREACH(tm_node, queue_list, node) {
987                 if (tm_node->id >= hw->data->nb_tx_queues) {
988                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
989                         error->message = "node's queue invalid";
990                         return false;
991                 }
992
993                 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
994                     hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
995                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
996                         error->message = "queue's TC not match parent's TC";
997                         return false;
998                 }
999         }
1000
1001         return true;
1002 }
1003
1004 static int
1005 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1006                             struct rte_tm_error *error)
1007 {
1008         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1009         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1010         struct hns3_tm_node *tm_node;
1011         uint8_t tc_no;
1012         int ret;
1013
1014         /* port */
1015         tm_node = pf->tm_conf.root;
1016         if (tm_node->shaper_profile) {
1017                 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1018                 if (ret) {
1019                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1020                         error->message = "fail to set port peak rate";
1021                         return -EIO;
1022                 }
1023         }
1024
1025         /* TC */
1026         TAILQ_FOREACH(tm_node, tc_list, node) {
1027                 if (tm_node->shaper_profile == NULL)
1028                         continue;
1029
1030                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1031                 ret = hns3_tm_config_tc_rate(hw, tc_no,
1032                                              tm_node->shaper_profile);
1033                 if (ret) {
1034                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1035                         error->message = "fail to set TC peak rate";
1036                         return -EIO;
1037                 }
1038         }
1039
1040         return 0;
1041 }
1042
1043 static int
1044 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1045                          int clear_on_fail,
1046                          struct rte_tm_error *error)
1047 {
1048         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1049         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1050         int ret;
1051
1052         if (error == NULL)
1053                 return -EINVAL;
1054
1055         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1056                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1057                 error->message = "device is resetting";
1058                 /* don't goto fail_clear, user may try later */
1059                 return -EBUSY;
1060         }
1061
1062         if (pf->tm_conf.root == NULL)
1063                 goto done;
1064
1065         /* check configure before commit make sure key configure not violated */
1066         if (!hns3_tm_configure_check(hw, error))
1067                 goto fail_clear;
1068
1069         ret = hns3_tm_hierarchy_do_commit(hw, error);
1070         if (ret)
1071                 goto fail_clear;
1072
1073 done:
1074         pf->tm_conf.committed = true;
1075         return 0;
1076
1077 fail_clear:
1078         if (clear_on_fail) {
1079                 hns3_tm_conf_uninit(dev);
1080                 hns3_tm_conf_init(dev);
1081         }
1082         return -EINVAL;
1083 }
1084
1085 static int
1086 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1087                               int clear_on_fail,
1088                               struct rte_tm_error *error)
1089 {
1090         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1091         int ret;
1092
1093         rte_spinlock_lock(&hw->lock);
1094         ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1095         rte_spinlock_unlock(&hw->lock);
1096
1097         return ret;
1098 }
1099
1100 static int
1101 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1102                               uint32_t node_id,
1103                               enum hns3_tm_node_type node_type,
1104                               struct hns3_tm_shaper_profile *shaper_profile,
1105                               struct rte_tm_error *error)
1106 {
1107         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1108         uint8_t tc_no;
1109         int ret;
1110
1111         if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1112                 if (shaper_profile != NULL) {
1113                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1114                         error->message = "queue node shaper not supported";
1115                         return -EINVAL;
1116                 }
1117                 return 0;
1118         }
1119
1120         if (!pf->tm_conf.committed)
1121                 return 0;
1122
1123         if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1124                 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1125                 if (ret) {
1126                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1127                         error->message = "fail to update port peak rate";
1128                 }
1129
1130                 return ret;
1131         }
1132
1133         /*
1134          * update TC's shaper
1135          */
1136         tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1137         ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1138         if (ret) {
1139                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1140                 error->message = "fail to update TC peak rate";
1141         }
1142
1143         return ret;
1144 }
1145
1146 static int
1147 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1148                            uint32_t node_id,
1149                            uint32_t shaper_profile_id,
1150                            struct rte_tm_error *error)
1151 {
1152         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1153         enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1154         struct hns3_tm_shaper_profile *profile = NULL;
1155         struct hns3_tm_node *tm_node;
1156
1157         if (error == NULL)
1158                 return -EINVAL;
1159
1160         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1161                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1162                 error->message = "device is resetting";
1163                 return -EBUSY;
1164         }
1165
1166         tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1167         if (tm_node == NULL) {
1168                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1169                 error->message = "no such node";
1170                 return -EINVAL;
1171         }
1172
1173         if (shaper_profile_id == tm_node->params.shaper_profile_id)
1174                 return 0;
1175
1176         if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1177                 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1178                 if (profile == NULL) {
1179                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1180                         error->message = "profile ID not exist";
1181                         return -EINVAL;
1182                 }
1183         }
1184
1185         if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1186                                           profile, error))
1187                 return -EINVAL;
1188
1189         if (tm_node->shaper_profile)
1190                 tm_node->shaper_profile->reference_count--;
1191         tm_node->shaper_profile = profile;
1192         tm_node->params.shaper_profile_id = shaper_profile_id;
1193         if (profile != NULL)
1194                 profile->reference_count++;
1195
1196         return 0;
1197 }
1198
1199 static int
1200 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1201                                 uint32_t node_id,
1202                                 uint32_t shaper_profile_id,
1203                                 struct rte_tm_error *error)
1204 {
1205         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1206         int ret;
1207
1208         rte_spinlock_lock(&hw->lock);
1209         ret = hns3_tm_node_shaper_update(dev, node_id,
1210                                          shaper_profile_id, error);
1211         rte_spinlock_unlock(&hw->lock);
1212
1213         return ret;
1214 }
1215
1216 static const struct rte_tm_ops hns3_tm_ops = {
1217         .capabilities_get       = hns3_tm_capabilities_get,
1218         .shaper_profile_add     = hns3_tm_shaper_profile_add,
1219         .shaper_profile_delete  = hns3_tm_shaper_profile_del,
1220         .node_add               = hns3_tm_node_add,
1221         .node_delete            = hns3_tm_node_delete,
1222         .node_type_get          = hns3_tm_node_type_get,
1223         .level_capabilities_get = hns3_tm_level_capabilities_get,
1224         .node_capabilities_get  = hns3_tm_node_capabilities_get,
1225         .hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
1226         .node_shaper_update     = hns3_tm_node_shaper_update_wrap,
1227 };
1228
1229 int
1230 hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1231                 void *arg)
1232 {
1233         if (arg == NULL)
1234                 return -EINVAL;
1235
1236         *(const void **)arg = &hns3_tm_ops;
1237
1238         return 0;
1239 }
1240
1241 void
1242 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1243 {
1244         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1245
1246         if (pf->tm_conf.root && !pf->tm_conf.committed)
1247                 hns3_warn(hw,
1248                     "please call hierarchy_commit() before starting the port.");
1249 }
1250
1251 /*
1252  * We need clear tm_conf committed flag when device stop so that user can modify
1253  * tm configuration (e.g. add or delete node).
1254  *
1255  * If user don't call hierarchy commit when device start later, the Port/TC's
1256  * shaper rate still the same as previous committed.
1257  *
1258  * To avoid the above problem, we need recover Port/TC shaper rate when device
1259  * stop.
1260  */
1261 void
1262 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1263 {
1264         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1265         struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1266         struct hns3_tm_node *tm_node;
1267         uint8_t tc_no;
1268
1269         if (!pf->tm_conf.committed)
1270                 return;
1271
1272         tm_node = pf->tm_conf.root;
1273         if (tm_node != NULL && tm_node->shaper_profile)
1274                 (void)hns3_tm_config_port_rate(hw, NULL);
1275
1276         TAILQ_FOREACH(tm_node, tc_list, node) {
1277                 if (tm_node->shaper_profile == NULL)
1278                         continue;
1279                 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1280                 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1281         }
1282
1283         pf->tm_conf.committed = false;
1284 }
1285
1286 int
1287 hns3_tm_conf_update(struct hns3_hw *hw)
1288 {
1289         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1290         struct rte_tm_error error;
1291
1292         if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1293                 return 0;
1294
1295         memset(&error, 0, sizeof(struct rte_tm_error));
1296         return hns3_tm_hierarchy_do_commit(hw, &error);
1297 }