net/hns3: support traffic management
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <rte_io.h>
6 #include <rte_ethdev.h>
7
8 #include "hns3_logs.h"
9 #include "hns3_ethdev.h"
10 #include "hns3_dcb.h"
11
12 #define HNS3_SHAPER_BS_U_DEF    5
13 #define HNS3_SHAPER_BS_S_DEF    20
14 #define BW_MAX_PERCENT          100
15
16 /*
17  * hns3_shaper_para_calc: calculate ir parameter for the shaper
18  * @ir: Rate to be config, its unit is Mbps
19  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20  * @shaper_para: shaper parameter of IR shaper
21  *
22  * the formula:
23  *
24  *              IR_b * (2 ^ IR_u) * 8
25  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
26  *              Tick * (2 ^ IR_s)
27  *
28  * @return: 0: calculate sucessful, negative: fail
29  */
30 static int
31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32                       struct hns3_shaper_parameter *shaper_para)
33 {
34 #define SHAPER_DEFAULT_IR_B     126
35 #define DIVISOR_CLK             (1000 * 8)
36 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
37
38         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39                 6 * 256,    /* Prioriy level */
40                 6 * 32,     /* Prioriy group level */
41                 6 * 8,      /* Port level */
42                 6 * 256     /* Qset level */
43         };
44         uint8_t ir_u_calc = 0;
45         uint8_t ir_s_calc = 0;
46         uint32_t denominator;
47         uint32_t ir_calc;
48         uint32_t tick;
49
50         /* Calc tick */
51         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52                 hns3_err(hw,
53                          "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54                          shaper_level, HNS3_SHAPER_LVL_CNT);
55                 return -EINVAL;
56         }
57
58         if (ir > hw->max_tm_rate) {
59                 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60                          "supported.", ir, hw->max_tm_rate);
61                 return -EINVAL;
62         }
63
64         tick = tick_array[shaper_level];
65
66         /*
67          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68          * the formula is changed to:
69          *              126 * 1 * 8
70          * ir_calc = ---------------- * 1000
71          *              tick * 1
72          */
73         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74
75         if (ir_calc == ir) {
76                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77         } else if (ir_calc > ir) {
78                 /* Increasing the denominator to select ir_s value */
79                 while (ir_calc >= ir && ir) {
80                         ir_s_calc++;
81                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82                 }
83
84                 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85                                     (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
86         } else {
87                 /*
88                  * Increasing the numerator to select ir_u value. ir_u_calc will
89                  * get maximum value when ir_calc is minimum and ir is maximum.
90                  * ir_calc gets minimum value when tick is the maximum value.
91                  * At the same time, value of ir_u_calc can only be increased up
92                  * to eight after the while loop if the value of ir is equal
93                  * to hw->max_tm_rate.
94                  */
95                 uint32_t numerator;
96                 do {
97                         ir_u_calc++;
98                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99                         ir_calc = (numerator + (tick >> 1)) / tick;
100                 } while (ir_calc < ir);
101
102                 if (ir_calc == ir) {
103                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
104                 } else {
105                         --ir_u_calc;
106
107                         /*
108                          * The maximum value of ir_u_calc in this branch is
109                          * seven in all cases. Thus, value of denominator can
110                          * not be zero here.
111                          */
112                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
113                         shaper_para->ir_b =
114                                 (ir * tick + (denominator >> 1)) / denominator;
115                 }
116         }
117
118         shaper_para->ir_u = ir_u_calc;
119         shaper_para->ir_s = ir_s_calc;
120
121         return 0;
122 }
123
124 static int
125 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
126 {
127 #define HNS3_HALF_BYTE_BIT_OFFSET 4
128         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
129
130         if (tc >= hw->dcb_info.num_tc)
131                 return -EINVAL;
132
133         /*
134          * The register for priority has four bytes, the first bytes includes
135          *  priority0 and priority1, the higher 4bit stands for priority1
136          *  while the lower 4bit stands for priority0, as below:
137          * first byte:  | pri_1 | pri_0 |
138          * second byte: | pri_3 | pri_2 |
139          * third byte:  | pri_5 | pri_4 |
140          * fourth byte: | pri_7 | pri_6 |
141          */
142         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
143
144         return 0;
145 }
146
147 static int
148 hns3_up_to_tc_map(struct hns3_hw *hw)
149 {
150         struct hns3_cmd_desc desc;
151         uint8_t *pri = (uint8_t *)desc.data;
152         uint8_t pri_id;
153         int ret;
154
155         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
156
157         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158                 ret = hns3_fill_pri_array(hw, pri, pri_id);
159                 if (ret)
160                         return ret;
161         }
162
163         return hns3_cmd_send(hw, &desc, 1);
164 }
165
166 static int
167 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
168 {
169         struct hns3_pg_to_pri_link_cmd *map;
170         struct hns3_cmd_desc desc;
171
172         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
173
174         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
175
176         map->pg_id = pg_id;
177         map->pri_bit_map = pri_bit_map;
178
179         return hns3_cmd_send(hw, &desc, 1);
180 }
181
182 static int
183 hns3_pg_to_pri_map(struct hns3_hw *hw)
184 {
185         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186         struct hns3_pf *pf = &hns->pf;
187         struct hns3_pg_info *pg_info;
188         int ret, i;
189
190         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
191                 return -EINVAL;
192
193         for (i = 0; i < hw->dcb_info.num_pg; i++) {
194                 /* Cfg pg to priority mapping */
195                 pg_info = &hw->dcb_info.pg_info[i];
196                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
197                 if (ret)
198                         return ret;
199         }
200
201         return 0;
202 }
203
204 static int
205 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
206 {
207         struct hns3_qs_to_pri_link_cmd *map;
208         struct hns3_cmd_desc desc;
209
210         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
211
212         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
213
214         map->qs_id = rte_cpu_to_le_16(qs_id);
215         map->priority = pri;
216         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
217
218         return hns3_cmd_send(hw, &desc, 1);
219 }
220
221 static int
222 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
223 {
224         struct hns3_qs_weight_cmd *weight;
225         struct hns3_cmd_desc desc;
226
227         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
228
229         weight = (struct hns3_qs_weight_cmd *)desc.data;
230
231         weight->qs_id = rte_cpu_to_le_16(qs_id);
232         weight->dwrr = dwrr;
233
234         return hns3_cmd_send(hw, &desc, 1);
235 }
236
237 static int
238 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
239 {
240 #define DEFAULT_TC_WEIGHT       1
241 #define DEFAULT_TC_OFFSET       14
242         struct hns3_ets_tc_weight_cmd *ets_weight;
243         struct hns3_cmd_desc desc;
244         uint8_t i;
245
246         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
248
249         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250                 struct hns3_pg_info *pg_info;
251
252                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
253
254                 if (!(hw->hw_tc_map & BIT(i)))
255                         continue;
256
257                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
258                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
259         }
260
261         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
262
263         return hns3_cmd_send(hw, &desc, 1);
264 }
265
266 static int
267 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
268 {
269         struct hns3_priority_weight_cmd *weight;
270         struct hns3_cmd_desc desc;
271
272         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
273
274         weight = (struct hns3_priority_weight_cmd *)desc.data;
275
276         weight->pri_id = pri_id;
277         weight->dwrr = dwrr;
278
279         return hns3_cmd_send(hw, &desc, 1);
280 }
281
282 static int
283 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
284 {
285         struct hns3_pg_weight_cmd *weight;
286         struct hns3_cmd_desc desc;
287
288         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
289
290         weight = (struct hns3_pg_weight_cmd *)desc.data;
291
292         weight->pg_id = pg_id;
293         weight->dwrr = dwrr;
294
295         return hns3_cmd_send(hw, &desc, 1);
296 }
297 static int
298 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
299 {
300         struct hns3_cmd_desc desc;
301
302         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
303
304         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
305                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
306         else
307                 desc.data[1] = 0;
308
309         desc.data[0] = rte_cpu_to_le_32(pg_id);
310
311         return hns3_cmd_send(hw, &desc, 1);
312 }
313
314 static uint32_t
315 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
316                            uint8_t bs_b, uint8_t bs_s)
317 {
318         uint32_t shapping_para = 0;
319
320         /* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
321         if (ir_b == 0)
322                 return shapping_para;
323
324         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
325         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
326         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
327         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
328         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
329
330         return shapping_para;
331 }
332
333 int
334 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
335 {
336         struct hns3_port_shapping_cmd *shap_cfg_cmd;
337         struct hns3_shaper_parameter shaper_parameter;
338         uint32_t shapping_para;
339         uint32_t ir_u, ir_b, ir_s;
340         struct hns3_cmd_desc desc;
341         int ret;
342
343         ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
344                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
345         if (ret) {
346                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
347                 return ret;
348         }
349
350         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
351         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
352
353         ir_b = shaper_parameter.ir_b;
354         ir_u = shaper_parameter.ir_u;
355         ir_s = shaper_parameter.ir_s;
356         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
357                                                    HNS3_SHAPER_BS_U_DEF,
358                                                    HNS3_SHAPER_BS_S_DEF);
359
360         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
361
362         /*
363          * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
364          * field in hns3_port_shapping_cmd to require firmware to recalculate
365          * shapping parameters. And whether the parameters are recalculated
366          * depends on the firmware version. But driver still needs to
367          * calculate it and configure to firmware for better compatibility.
368          */
369         shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
370         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
371
372         return hns3_cmd_send(hw, &desc, 1);
373 }
374
375 static int
376 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
377                          uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
378 {
379         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
380         enum hns3_opcode_type opcode;
381         struct hns3_cmd_desc desc;
382
383         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
384                  HNS3_OPC_TM_PG_C_SHAPPING;
385         hns3_cmd_setup_basic_desc(&desc, opcode, false);
386
387         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
388
389         shap_cfg_cmd->pg_id = pg_id;
390
391         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
392
393         /*
394          * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
395          * hns3_pg_shapping_cmd to require firmware to recalculate shapping
396          * parameters. And whether parameters are recalculated depends on
397          * the firmware version. But driver still needs to calculate it and
398          * configure to firmware for better compatibility.
399          */
400         shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
401         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
402
403         return hns3_cmd_send(hw, &desc, 1);
404 }
405
406 int
407 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
408 {
409         struct hns3_shaper_parameter shaper_parameter;
410         uint32_t ir_u, ir_b, ir_s;
411         uint32_t shaper_para;
412         int ret;
413
414         /* Calc shaper para */
415         ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
416                                     &shaper_parameter);
417         if (ret) {
418                 hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
419                          ret);
420                 return ret;
421         }
422
423         shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
424                                                  HNS3_SHAPER_BS_U_DEF,
425                                                  HNS3_SHAPER_BS_S_DEF);
426
427         ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
428                                        shaper_para, rate);
429         if (ret) {
430                 hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
431                          ret);
432                 return ret;
433         }
434
435         ir_b = shaper_parameter.ir_b;
436         ir_u = shaper_parameter.ir_u;
437         ir_s = shaper_parameter.ir_s;
438         shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
439                                                  HNS3_SHAPER_BS_U_DEF,
440                                                  HNS3_SHAPER_BS_S_DEF);
441
442         ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
443                                        shaper_para, rate);
444         if (ret) {
445                 hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
446                          ret);
447                 return ret;
448         }
449
450         return 0;
451 }
452
453 static int
454 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
455 {
456         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
457         uint32_t rate;
458         uint8_t i;
459         int ret;
460
461         /* Cfg pg schd */
462         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
463                 return -EINVAL;
464
465         /* Pg to pri */
466         for (i = 0; i < hw->dcb_info.num_pg; i++) {
467                 rate = hw->dcb_info.pg_info[i].bw_limit;
468                 ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
469                 if (ret)
470                         return ret;
471         }
472
473         return 0;
474 }
475
476 static int
477 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
478 {
479         struct hns3_cmd_desc desc;
480
481         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
482
483         if (mode == HNS3_SCH_MODE_DWRR)
484                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
485         else
486                 desc.data[1] = 0;
487
488         desc.data[0] = rte_cpu_to_le_32(qs_id);
489
490         return hns3_cmd_send(hw, &desc, 1);
491 }
492
493 static int
494 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
495 {
496         struct hns3_cmd_desc desc;
497
498         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
499
500         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
501                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
502         else
503                 desc.data[1] = 0;
504
505         desc.data[0] = rte_cpu_to_le_32(pri_id);
506
507         return hns3_cmd_send(hw, &desc, 1);
508 }
509
510 static int
511 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
512                           uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
513 {
514         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
515         enum hns3_opcode_type opcode;
516         struct hns3_cmd_desc desc;
517
518         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
519                  HNS3_OPC_TM_PRI_C_SHAPPING;
520
521         hns3_cmd_setup_basic_desc(&desc, opcode, false);
522
523         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
524
525         shap_cfg_cmd->pri_id = pri_id;
526
527         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
528
529         /*
530          * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
531          * field in hns3_pri_shapping_cmd to require firmware to recalculate
532          * shapping parameters. And whether the parameters are recalculated
533          * depends on the firmware version. But driver still needs to
534          * calculate it and configure to firmware for better compatibility.
535          */
536         shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
537         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
538
539         return hns3_cmd_send(hw, &desc, 1);
540 }
541
542 int
543 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
544 {
545         struct hns3_shaper_parameter shaper_parameter;
546         uint32_t ir_u, ir_b, ir_s;
547         uint32_t shaper_para;
548         int ret;
549
550         ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
551                                     &shaper_parameter);
552         if (ret) {
553                 hns3_err(hw, "calculate shaper parameter failed: %d.",
554                          ret);
555                 return ret;
556         }
557
558         shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
559                                                  HNS3_SHAPER_BS_U_DEF,
560                                                  HNS3_SHAPER_BS_S_DEF);
561
562         ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
563                                         shaper_para, rate);
564         if (ret) {
565                 hns3_err(hw,
566                          "config priority CIR shaper parameter failed: %d.",
567                          ret);
568                 return ret;
569         }
570
571         ir_b = shaper_parameter.ir_b;
572         ir_u = shaper_parameter.ir_u;
573         ir_s = shaper_parameter.ir_s;
574         shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
575                                                  HNS3_SHAPER_BS_U_DEF,
576                                                  HNS3_SHAPER_BS_S_DEF);
577
578         ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
579                                         shaper_para, rate);
580         if (ret) {
581                 hns3_err(hw,
582                          "config priority PIR shaper parameter failed: %d.",
583                          ret);
584                 return ret;
585         }
586
587         return 0;
588 }
589
590 static int
591 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
592 {
593         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
594         uint32_t rate;
595         uint8_t i;
596         int ret;
597
598         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
599                 return -EINVAL;
600
601         for (i = 0; i < hw->dcb_info.num_tc; i++) {
602                 rate = hw->dcb_info.tc_info[i].bw_limit;
603                 ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
604                 if (ret) {
605                         hns3_err(hw, "config pri shaper failed: %d.", ret);
606                         return ret;
607                 }
608         }
609
610         return 0;
611 }
612
613 static int
614 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
615 {
616         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
617         uint16_t rx_qnum_per_tc;
618         uint16_t used_rx_queues;
619         int i;
620
621         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
622         if (rx_qnum_per_tc > hw->rss_size_max) {
623                 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
624                          "value (%u) hardware supported.",
625                          rx_qnum_per_tc, hw->rss_size_max);
626                 return -EINVAL;
627         }
628
629         used_rx_queues = hw->num_tc * rx_qnum_per_tc;
630         if (used_rx_queues != nb_rx_q) {
631                 hns3_err(hw, "rx queue number (%u) configured must be an "
632                          "integral multiple of valid tc number (%u).",
633                          nb_rx_q, hw->num_tc);
634                 return -EINVAL;
635         }
636         hw->alloc_rss_size = rx_qnum_per_tc;
637         hw->used_rx_queues = used_rx_queues;
638
639         /*
640          * When rss size is changed, we need to update rss redirection table
641          * maintained by driver. Besides, during the entire reset process, we
642          * need to ensure that the rss table information are not overwritten
643          * and configured directly to the hardware in the RESET_STAGE_RESTORE
644          * stage of the reset process.
645          */
646         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
647                 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
648                         rss_cfg->rss_indirection_tbl[i] =
649                                                         i % hw->alloc_rss_size;
650         }
651
652         return 0;
653 }
654
655 static int
656 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
657 {
658         struct hns3_tc_queue_info *tc_queue;
659         uint16_t used_tx_queues;
660         uint16_t tx_qnum_per_tc;
661         uint8_t i;
662
663         tx_qnum_per_tc = nb_tx_q / hw->num_tc;
664         used_tx_queues = hw->num_tc * tx_qnum_per_tc;
665         if (used_tx_queues != nb_tx_q) {
666                 hns3_err(hw, "tx queue number (%u) configured must be an "
667                          "integral multiple of valid tc number (%u).",
668                          nb_tx_q, hw->num_tc);
669                 return -EINVAL;
670         }
671
672         hw->used_tx_queues = used_tx_queues;
673         hw->tx_qnum_per_tc = tx_qnum_per_tc;
674         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
675                 tc_queue = &hw->tc_queue[i];
676                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
677                         tc_queue->enable = true;
678                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
679                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
680                         tc_queue->tc = i;
681                 } else {
682                         /* Set to default queue if TC is disable */
683                         tc_queue->enable = false;
684                         tc_queue->tqp_offset = 0;
685                         tc_queue->tqp_count = 0;
686                         tc_queue->tc = 0;
687                 }
688         }
689
690         return 0;
691 }
692
693 uint8_t
694 hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
695 {
696         struct hns3_tc_queue_info *tc_queue;
697         uint8_t i;
698
699         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
700                 tc_queue = &hw->tc_queue[i];
701                 if (!tc_queue->enable)
702                         continue;
703
704                 if (txq_no >= tc_queue->tqp_offset &&
705                     txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
706                         return i;
707         }
708
709         /* return TC0 in default case */
710         return 0;
711 }
712
713 int
714 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
715 {
716         int ret;
717
718         ret = hns3_set_rss_size(hw, nb_rx_q);
719         if (ret)
720                 return ret;
721
722         return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
723 }
724
725 static int
726 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
727                                  uint16_t nb_tx_q)
728 {
729         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
730         struct hns3_pf *pf = &hns->pf;
731         int ret;
732
733         hw->num_tc = hw->dcb_info.num_tc;
734         ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
735         if (ret)
736                 return ret;
737
738         if (!hns->is_vf)
739                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
740
741         return 0;
742 }
743
744 int
745 hns3_dcb_info_init(struct hns3_hw *hw)
746 {
747         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
748         struct hns3_pf *pf = &hns->pf;
749         int i, k;
750
751         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
752             hw->dcb_info.num_pg != 1)
753                 return -EINVAL;
754
755         /* Initializing PG information */
756         memset(hw->dcb_info.pg_info, 0,
757                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
758         for (i = 0; i < hw->dcb_info.num_pg; i++) {
759                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
760                 hw->dcb_info.pg_info[i].pg_id = i;
761                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
762                 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
763
764                 if (i != 0)
765                         continue;
766
767                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
768                 for (k = 0; k < hw->dcb_info.num_tc; k++)
769                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
770         }
771
772         /* All UPs mapping to TC0 */
773         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
774                 hw->dcb_info.prio_tc[i] = 0;
775
776         /* Initializing tc information */
777         memset(hw->dcb_info.tc_info, 0,
778                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
779         for (i = 0; i < hw->dcb_info.num_tc; i++) {
780                 hw->dcb_info.tc_info[i].tc_id = i;
781                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
782                 hw->dcb_info.tc_info[i].pgid = 0;
783                 hw->dcb_info.tc_info[i].bw_limit =
784                         hw->dcb_info.pg_info[0].bw_limit;
785         }
786
787         return 0;
788 }
789
790 static int
791 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
792 {
793         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
794         struct hns3_pf *pf = &hns->pf;
795         int ret, i;
796
797         /* Only being config on TC-Based scheduler mode */
798         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
799                 return -EINVAL;
800
801         for (i = 0; i < hw->dcb_info.num_pg; i++) {
802                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
803                 if (ret)
804                         return ret;
805         }
806
807         return 0;
808 }
809
810 static int
811 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
812 {
813         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
814         struct hns3_pf *pf = &hns->pf;
815         uint8_t i;
816         int ret;
817
818         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
819                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
820                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
821                         if (ret)
822                                 return ret;
823
824                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
825                                                         HNS3_SCH_MODE_DWRR);
826                         if (ret)
827                                 return ret;
828                 }
829         }
830
831         return 0;
832 }
833
834 static int
835 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
836 {
837         int ret;
838
839         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
840         if (ret) {
841                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
842                 return ret;
843         }
844
845         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
846         if (ret)
847                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
848
849         return ret;
850 }
851
852 static int
853 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
854 {
855         struct hns3_pg_info *pg_info;
856         uint8_t dwrr;
857         int ret, i;
858
859         for (i = 0; i < hw->dcb_info.num_tc; i++) {
860                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
861                 dwrr = pg_info->tc_dwrr[i];
862
863                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
864                 if (ret) {
865                         hns3_err(hw,
866                                "fail to send priority weight cmd: %d, ret = %d",
867                                i, ret);
868                         return ret;
869                 }
870
871                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
872                 if (ret) {
873                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
874                                  i, ret);
875                         return ret;
876                 }
877         }
878
879         return 0;
880 }
881
882 static int
883 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
884 {
885         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
886         struct hns3_pf *pf = &hns->pf;
887         uint32_t version;
888         int ret;
889
890         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
891                 return -EINVAL;
892
893         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
894         if (ret)
895                 return ret;
896
897         if (!hns3_dev_dcb_supported(hw))
898                 return 0;
899
900         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
901         if (ret == -EOPNOTSUPP) {
902                 version = hw->fw_version;
903                 hns3_warn(hw,
904                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
905                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
906                                          HNS3_FW_VERSION_BYTE3_S),
907                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
908                                          HNS3_FW_VERSION_BYTE2_S),
909                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
910                                          HNS3_FW_VERSION_BYTE1_S),
911                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
912                                          HNS3_FW_VERSION_BYTE0_S));
913                 ret = 0;
914         }
915
916         return ret;
917 }
918
919 static int
920 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
921 {
922         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
923         struct hns3_pf *pf = &hns->pf;
924         int ret, i;
925
926         /* Cfg pg schd */
927         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
928                 return -EINVAL;
929
930         /* Cfg pg to prio */
931         for (i = 0; i < hw->dcb_info.num_pg; i++) {
932                 /* Cfg dwrr */
933                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
934                 if (ret)
935                         return ret;
936         }
937
938         return 0;
939 }
940
941 static int
942 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
943 {
944         int ret;
945
946         ret = hns3_dcb_pg_dwrr_cfg(hw);
947         if (ret) {
948                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
949                 return ret;
950         }
951
952         ret = hns3_dcb_pri_dwrr_cfg(hw);
953         if (ret)
954                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
955
956         return ret;
957 }
958
959 static int
960 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
961 {
962         int ret;
963
964         ret = hns3_dcb_port_shaper_cfg(hw);
965         if (ret) {
966                 hns3_err(hw, "config port shaper failed: %d", ret);
967                 return ret;
968         }
969
970         ret = hns3_dcb_pg_shaper_cfg(hw);
971         if (ret) {
972                 hns3_err(hw, "config pg shaper failed: %d", ret);
973                 return ret;
974         }
975
976         return hns3_dcb_pri_shaper_cfg(hw);
977 }
978
979 static int
980 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
981 {
982         struct hns3_nq_to_qs_link_cmd *map;
983         struct hns3_cmd_desc desc;
984         uint16_t tmp_qs_id = 0;
985         uint16_t qs_id_l;
986         uint16_t qs_id_h;
987
988         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
989
990         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
991
992         map->nq_id = rte_cpu_to_le_16(q_id);
993
994         /*
995          * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
996          * configure qset_id. So we need to convert qs_id to the follow
997          * format to support qset_id > 1024.
998          * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
999          *            /         / \         \
1000          *           /         /   \         \
1001          * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
1002          *          | qs_id_h | vld | qs_id_l |
1003          */
1004         qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1005                                  HNS3_DCB_QS_ID_L_S);
1006         qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1007                                  HNS3_DCB_QS_ID_H_S);
1008         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1009                        qs_id_l);
1010         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1011                        HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1012         map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1013
1014         return hns3_cmd_send(hw, &desc, 1);
1015 }
1016
1017 static int
1018 hns3_q_to_qs_map(struct hns3_hw *hw)
1019 {
1020         struct hns3_tc_queue_info *tc_queue;
1021         uint16_t q_id;
1022         uint32_t i, j;
1023         int ret;
1024
1025         for (i = 0; i < hw->num_tc; i++) {
1026                 tc_queue = &hw->tc_queue[i];
1027                 for (j = 0; j < tc_queue->tqp_count; j++) {
1028                         q_id = tc_queue->tqp_offset + j;
1029                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1030                         if (ret)
1031                                 return ret;
1032                 }
1033         }
1034
1035         return 0;
1036 }
1037
1038 static int
1039 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1040 {
1041         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1042         struct hns3_pf *pf = &hns->pf;
1043         uint32_t i;
1044         int ret;
1045
1046         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1047                 return -EINVAL;
1048
1049         /* Cfg qs -> pri mapping */
1050         for (i = 0; i < hw->num_tc; i++) {
1051                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1052                 if (ret) {
1053                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1054                         return ret;
1055                 }
1056         }
1057
1058         /* Cfg q -> qs mapping */
1059         ret = hns3_q_to_qs_map(hw);
1060         if (ret)
1061                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1062
1063         return ret;
1064 }
1065
1066 static int
1067 hns3_dcb_map_cfg(struct hns3_hw *hw)
1068 {
1069         int ret;
1070
1071         ret = hns3_up_to_tc_map(hw);
1072         if (ret) {
1073                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1074                 return ret;
1075         }
1076
1077         ret = hns3_pg_to_pri_map(hw);
1078         if (ret) {
1079                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1080                 return ret;
1081         }
1082
1083         return hns3_pri_q_qs_cfg(hw);
1084 }
1085
1086 static int
1087 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1088 {
1089         int ret;
1090
1091         /* Cfg dcb mapping  */
1092         ret = hns3_dcb_map_cfg(hw);
1093         if (ret)
1094                 return ret;
1095
1096         /* Cfg dcb shaper */
1097         ret = hns3_dcb_shaper_cfg(hw);
1098         if (ret)
1099                 return ret;
1100
1101         /* Cfg dwrr */
1102         ret = hns3_dcb_dwrr_cfg(hw);
1103         if (ret)
1104                 return ret;
1105
1106         /* Cfg schd mode for each level schd */
1107         return hns3_dcb_schd_mode_cfg(hw);
1108 }
1109
1110 static int
1111 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1112                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
1113 {
1114         struct hns3_cfg_pause_param_cmd *pause_param;
1115         struct hns3_cmd_desc desc;
1116
1117         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1118
1119         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1120
1121         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1122         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1123         pause_param->pause_trans_gap = pause_trans_gap;
1124         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1125
1126         return hns3_cmd_send(hw, &desc, 1);
1127 }
1128
1129 int
1130 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1131 {
1132         struct hns3_cfg_pause_param_cmd *pause_param;
1133         struct hns3_cmd_desc desc;
1134         uint16_t trans_time;
1135         uint8_t trans_gap;
1136         int ret;
1137
1138         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1139
1140         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1141
1142         ret = hns3_cmd_send(hw, &desc, 1);
1143         if (ret)
1144                 return ret;
1145
1146         trans_gap = pause_param->pause_trans_gap;
1147         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1148
1149         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1150 }
1151
1152 static int
1153 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1154 {
1155 #define PAUSE_TIME_DIV_BY       2
1156 #define PAUSE_TIME_MIN_VALUE    0x4
1157
1158         struct hns3_mac *mac = &hw->mac;
1159         uint8_t pause_trans_gap;
1160
1161         /*
1162          * Pause transmit gap must be less than "pause_time / 2", otherwise
1163          * the behavior of MAC is undefined.
1164          */
1165         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1166                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1167         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1168                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1169                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1170         else {
1171                 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1172                 pause_time = PAUSE_TIME_MIN_VALUE;
1173                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1174         }
1175
1176         return hns3_pause_param_cfg(hw, mac->mac_addr,
1177                                     pause_trans_gap, pause_time);
1178 }
1179
1180 static int
1181 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1182 {
1183         struct hns3_cmd_desc desc;
1184
1185         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1186
1187         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1188                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1189
1190         return hns3_cmd_send(hw, &desc, 1);
1191 }
1192
1193 static int
1194 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1195 {
1196         struct hns3_cmd_desc desc;
1197         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1198
1199         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1200
1201         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1202                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1203
1204         pfc->pri_en_bitmap = pfc_bitmap;
1205
1206         return hns3_cmd_send(hw, &desc, 1);
1207 }
1208
1209 static int
1210 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1211 {
1212         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1213         struct hns3_cmd_desc desc;
1214
1215         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1216
1217         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1218
1219         bp_to_qs_map_cmd->tc_id = tc;
1220         bp_to_qs_map_cmd->qs_group_id = grp_id;
1221         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1222
1223         return hns3_cmd_send(hw, &desc, 1);
1224 }
1225
1226 static void
1227 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1228 {
1229         switch (hw->current_mode) {
1230         case HNS3_FC_NONE:
1231                 *tx_en = false;
1232                 *rx_en = false;
1233                 break;
1234         case HNS3_FC_RX_PAUSE:
1235                 *tx_en = false;
1236                 *rx_en = true;
1237                 break;
1238         case HNS3_FC_TX_PAUSE:
1239                 *tx_en = true;
1240                 *rx_en = false;
1241                 break;
1242         case HNS3_FC_FULL:
1243                 *tx_en = true;
1244                 *rx_en = true;
1245                 break;
1246         default:
1247                 *tx_en = false;
1248                 *rx_en = false;
1249                 break;
1250         }
1251 }
1252
1253 static int
1254 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1255 {
1256         bool tx_en, rx_en;
1257
1258         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1259                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1260         else {
1261                 tx_en = false;
1262                 rx_en = false;
1263         }
1264
1265         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1266 }
1267
1268 static int
1269 hns3_pfc_setup_hw(struct hns3_hw *hw)
1270 {
1271         bool tx_en, rx_en;
1272
1273         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1274                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1275         else {
1276                 tx_en = false;
1277                 rx_en = false;
1278         }
1279
1280         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1281 }
1282
1283 /*
1284  * Each Tc has a 1024 queue sets to backpress, it divides to
1285  * 32 group, each group contains 32 queue sets, which can be
1286  * represented by uint32_t bitmap.
1287  */
1288 static int
1289 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1290 {
1291         uint32_t qs_bitmap;
1292         int ret;
1293         int i;
1294
1295         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1296                 uint8_t grp, sub_grp;
1297                 qs_bitmap = 0;
1298
1299                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1300                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1301                                          HNS3_BP_SUB_GRP_ID_S);
1302                 if (i == grp)
1303                         qs_bitmap |= (1 << sub_grp);
1304
1305                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1306                 if (ret)
1307                         return ret;
1308         }
1309
1310         return 0;
1311 }
1312
1313 static int
1314 hns3_dcb_bp_setup(struct hns3_hw *hw)
1315 {
1316         int ret, i;
1317
1318         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1319                 ret = hns3_bp_setup_hw(hw, i);
1320                 if (ret)
1321                         return ret;
1322         }
1323
1324         return 0;
1325 }
1326
1327 static int
1328 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1329 {
1330         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1331         struct hns3_pf *pf = &hns->pf;
1332         int ret;
1333
1334         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1335         if (ret) {
1336                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1337                 return ret;
1338         }
1339
1340         ret = hns3_mac_pause_setup_hw(hw);
1341         if (ret) {
1342                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1343                 return ret;
1344         }
1345
1346         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1347         if (!hns3_dev_dcb_supported(hw))
1348                 return 0;
1349
1350         ret = hns3_pfc_setup_hw(hw);
1351         if (ret) {
1352                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1353                 return ret;
1354         }
1355
1356         return hns3_dcb_bp_setup(hw);
1357 }
1358
1359 static uint8_t
1360 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1361 {
1362         uint8_t pfc_map = 0;
1363         uint8_t *prio_tc;
1364         uint8_t i, j;
1365
1366         prio_tc = hw->dcb_info.prio_tc;
1367         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1368                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1369                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1370                                 pfc_map |= BIT(i);
1371                                 break;
1372                         }
1373                 }
1374         }
1375
1376         return pfc_map;
1377 }
1378
1379 static void
1380 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1381 {
1382         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1383         struct hns3_hw *hw = &hns->hw;
1384         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1385         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1386         uint8_t max_tc = 0;
1387         uint8_t pfc_en;
1388         int i;
1389
1390         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1391         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1392                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1393                         *changed = true;
1394
1395                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1396                         max_tc = dcb_rx_conf->dcb_tc[i];
1397         }
1398         *tc = max_tc + 1;
1399         if (*tc != hw->dcb_info.num_tc)
1400                 *changed = true;
1401
1402         /*
1403          * We ensure that dcb information can be reconfigured
1404          * after the hns3_priority_flow_ctrl_set function called.
1405          */
1406         if (hw->current_mode != HNS3_FC_FULL)
1407                 *changed = true;
1408         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1409         if (hw->dcb_info.pfc_en != pfc_en)
1410                 *changed = true;
1411
1412         /* tx/rx queue number is reconfigured. */
1413         if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
1414                 *changed = true;
1415 }
1416
1417 static int
1418 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1419 {
1420         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1421         struct hns3_pf *pf = &hns->pf;
1422         struct hns3_hw *hw = &hns->hw;
1423         uint8_t tc_bw, bw_rest;
1424         uint8_t i, j;
1425         int ret;
1426
1427         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1428         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1429         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1430
1431         /* Config pg0 */
1432         memset(hw->dcb_info.pg_info, 0,
1433                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1434         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1435         hw->dcb_info.pg_info[0].pg_id = 0;
1436         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1437         hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1438         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1439
1440         /* Each tc has same bw for valid tc by default */
1441         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1442         for (i = 0; i < hw->dcb_info.num_tc; i++)
1443                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1444         /* To ensure the sum of tc_dwrr is equal to 100 */
1445         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1446         for (j = 0; j < bw_rest; j++)
1447                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1448         for (; i < dcb_rx_conf->nb_tcs; i++)
1449                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1450
1451         /* All tcs map to pg0 */
1452         memset(hw->dcb_info.tc_info, 0,
1453                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1454         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1455                 hw->dcb_info.tc_info[i].tc_id = i;
1456                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1457                 hw->dcb_info.tc_info[i].pgid = 0;
1458                 hw->dcb_info.tc_info[i].bw_limit =
1459                                         hw->dcb_info.pg_info[0].bw_limit;
1460         }
1461
1462         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1463                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1464
1465         ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1466                                                hw->data->nb_tx_queues);
1467         if (ret)
1468                 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1469
1470         return ret;
1471 }
1472
1473 static int
1474 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1475 {
1476         struct hns3_pf *pf = &hns->pf;
1477         struct hns3_hw *hw = &hns->hw;
1478         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1479         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1480         uint8_t bit_map = 0;
1481         uint8_t i;
1482
1483         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1484             hw->dcb_info.num_pg != 1)
1485                 return -EINVAL;
1486
1487         if (nb_rx_q < num_tc) {
1488                 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1489                          nb_rx_q, num_tc);
1490                 return -EINVAL;
1491         }
1492
1493         if (nb_tx_q < num_tc) {
1494                 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1495                          nb_tx_q, num_tc);
1496                 return -EINVAL;
1497         }
1498
1499         /* Currently not support uncontinuous tc */
1500         hw->dcb_info.num_tc = num_tc;
1501         for (i = 0; i < hw->dcb_info.num_tc; i++)
1502                 bit_map |= BIT(i);
1503
1504         if (!bit_map) {
1505                 bit_map = 1;
1506                 hw->dcb_info.num_tc = 1;
1507         }
1508         hw->hw_tc_map = bit_map;
1509
1510         return hns3_dcb_info_cfg(hns);
1511 }
1512
1513 static int
1514 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1515 {
1516         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1517         struct hns3_pf *pf = &hns->pf;
1518         struct hns3_hw *hw = &hns->hw;
1519         enum hns3_fc_status fc_status = hw->current_fc_status;
1520         enum hns3_fc_mode current_mode = hw->current_mode;
1521         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1522         int ret, status;
1523
1524         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1525             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1526                 return -ENOTSUP;
1527
1528         ret = hns3_dcb_schd_setup_hw(hw);
1529         if (ret) {
1530                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1531                 return ret;
1532         }
1533
1534         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1535                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1536                 if (dcb_rx_conf->nb_tcs == 0)
1537                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1538                 else
1539                         hw->dcb_info.pfc_en =
1540                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1541
1542                 hw->dcb_info.hw_pfc_map =
1543                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1544
1545                 ret = hns3_buffer_alloc(hw);
1546                 if (ret)
1547                         return ret;
1548
1549                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1550                 hw->current_mode = HNS3_FC_FULL;
1551                 ret = hns3_dcb_pause_setup_hw(hw);
1552                 if (ret) {
1553                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1554                         goto pfc_setup_fail;
1555                 }
1556         } else {
1557                 /*
1558                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1559                  * flag, the DCB information is configured, such as tc numbers.
1560                  * Therefore, refreshing the allocation of packet buffer is
1561                  * necessary.
1562                  */
1563                 ret = hns3_buffer_alloc(hw);
1564                 if (ret)
1565                         return ret;
1566         }
1567
1568         return 0;
1569
1570 pfc_setup_fail:
1571         hw->current_mode = current_mode;
1572         hw->current_fc_status = fc_status;
1573         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1574         status = hns3_buffer_alloc(hw);
1575         if (status)
1576                 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1577
1578         return ret;
1579 }
1580
1581 /*
1582  * hns3_dcb_configure - setup dcb related config
1583  * @hns: pointer to hns3 adapter
1584  * Returns 0 on success, negative value on failure.
1585  */
1586 int
1587 hns3_dcb_configure(struct hns3_adapter *hns)
1588 {
1589         struct hns3_hw *hw = &hns->hw;
1590         bool map_changed = false;
1591         uint8_t num_tc = 0;
1592         int ret;
1593
1594         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1595         if (map_changed ||
1596             __atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED)) {
1597                 ret = hns3_dcb_info_update(hns, num_tc);
1598                 if (ret) {
1599                         hns3_err(hw, "dcb info update failed: %d", ret);
1600                         return ret;
1601                 }
1602
1603                 ret = hns3_dcb_hw_configure(hns);
1604                 if (ret) {
1605                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1606                         return ret;
1607                 }
1608         }
1609
1610         return 0;
1611 }
1612
1613 int
1614 hns3_dcb_init_hw(struct hns3_hw *hw)
1615 {
1616         int ret;
1617
1618         ret = hns3_dcb_schd_setup_hw(hw);
1619         if (ret) {
1620                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1621                 return ret;
1622         }
1623
1624         ret = hns3_dcb_pause_setup_hw(hw);
1625         if (ret)
1626                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1627
1628         return ret;
1629 }
1630
1631 int
1632 hns3_dcb_init(struct hns3_hw *hw)
1633 {
1634         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1635         struct hns3_pf *pf = &hns->pf;
1636         uint16_t default_tqp_num;
1637         int ret;
1638
1639         PMD_INIT_FUNC_TRACE();
1640
1641         /*
1642          * According to the 'adapter_state' identifier, the following branch
1643          * is only executed to initialize default configurations of dcb during
1644          * the initializing driver process. Due to driver saving dcb-related
1645          * information before reset triggered, the reinit dev stage of the
1646          * reset process can not access to the branch, or those information
1647          * will be changed.
1648          */
1649         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1650                 hw->requested_mode = HNS3_FC_NONE;
1651                 hw->current_mode = hw->requested_mode;
1652                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1653                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1654
1655                 ret = hns3_dcb_info_init(hw);
1656                 if (ret) {
1657                         hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1658                         return ret;
1659                 }
1660
1661                 /*
1662                  * The number of queues configured by default cannot exceed
1663                  * the maximum number of queues for a single TC.
1664                  */
1665                 default_tqp_num = RTE_MIN(hw->rss_size_max,
1666                                           hw->tqps_num / hw->dcb_info.num_tc);
1667                 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1668                                                        default_tqp_num);
1669                 if (ret) {
1670                         hns3_err(hw,
1671                                  "update tc queue mapping failed, ret = %d.",
1672                                  ret);
1673                         return ret;
1674                 }
1675         }
1676
1677         /*
1678          * DCB hardware will be configured by following the function during
1679          * the initializing driver process and the reset process. However,
1680          * driver will restore directly configurations of dcb hardware based
1681          * on dcb-related information soft maintained when driver
1682          * initialization has finished and reset is coming.
1683          */
1684         ret = hns3_dcb_init_hw(hw);
1685         if (ret) {
1686                 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1687                 return ret;
1688         }
1689
1690         return 0;
1691 }
1692
1693 static int
1694 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1695 {
1696         struct hns3_hw *hw = &hns->hw;
1697         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1698         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1699         int ret;
1700
1701         ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1702         if (ret) {
1703                 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1704                          ret);
1705                 return ret;
1706         }
1707         ret = hns3_q_to_qs_map(hw);
1708         if (ret)
1709                 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1710
1711         return ret;
1712 }
1713
1714 int
1715 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1716 {
1717         struct hns3_hw *hw = &hns->hw;
1718         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1719         int ret;
1720
1721         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1722                 ret = hns3_dcb_configure(hns);
1723                 if (ret)
1724                         hns3_err(hw, "Failed to config dcb: %d", ret);
1725         } else {
1726                 /*
1727                  * Update queue map without PFC configuration,
1728                  * due to queues reconfigured by user.
1729                  */
1730                 ret = hns3_update_queue_map_configure(hns);
1731                 if (ret)
1732                         hns3_err(hw,
1733                                  "Failed to update queue mapping configure: %d",
1734                                  ret);
1735         }
1736
1737         return ret;
1738 }
1739
1740 /*
1741  * hns3_dcb_pfc_enable - Enable priority flow control
1742  * @dev: pointer to ethernet device
1743  *
1744  * Configures the pfc settings for one porority.
1745  */
1746 int
1747 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1748 {
1749         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1750         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1751         enum hns3_fc_status fc_status = hw->current_fc_status;
1752         enum hns3_fc_mode current_mode = hw->current_mode;
1753         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1754         uint8_t pfc_en = hw->dcb_info.pfc_en;
1755         uint8_t priority = pfc_conf->priority;
1756         uint16_t pause_time = pf->pause_time;
1757         int ret, status;
1758
1759         pf->pause_time = pfc_conf->fc.pause_time;
1760         hw->current_mode = hw->requested_mode;
1761         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1762         hw->dcb_info.pfc_en |= BIT(priority);
1763         hw->dcb_info.hw_pfc_map =
1764                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1765         ret = hns3_buffer_alloc(hw);
1766         if (ret)
1767                 goto pfc_setup_fail;
1768
1769         /*
1770          * The flow control mode of all UPs will be changed based on
1771          * current_mode coming from user.
1772          */
1773         ret = hns3_dcb_pause_setup_hw(hw);
1774         if (ret) {
1775                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1776                 goto pfc_setup_fail;
1777         }
1778
1779         return 0;
1780
1781 pfc_setup_fail:
1782         hw->current_mode = current_mode;
1783         hw->current_fc_status = fc_status;
1784         pf->pause_time = pause_time;
1785         hw->dcb_info.pfc_en = pfc_en;
1786         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1787         status = hns3_buffer_alloc(hw);
1788         if (status)
1789                 hns3_err(hw, "recover packet buffer fail: %d", status);
1790
1791         return ret;
1792 }
1793
1794 /*
1795  * hns3_fc_enable - Enable MAC pause
1796  * @dev: pointer to ethernet device
1797  *
1798  * Configures the MAC pause settings.
1799  */
1800 int
1801 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1802 {
1803         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1805         enum hns3_fc_status fc_status = hw->current_fc_status;
1806         enum hns3_fc_mode current_mode = hw->current_mode;
1807         uint16_t pause_time = pf->pause_time;
1808         int ret;
1809
1810         pf->pause_time = fc_conf->pause_time;
1811         hw->current_mode = hw->requested_mode;
1812
1813         /*
1814          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1815          * of flow control is configured to be HNS3_FC_NONE.
1816          */
1817         if (hw->current_mode == HNS3_FC_NONE)
1818                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1819         else
1820                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1821
1822         ret = hns3_dcb_pause_setup_hw(hw);
1823         if (ret) {
1824                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1825                 goto setup_fc_fail;
1826         }
1827
1828         return 0;
1829
1830 setup_fc_fail:
1831         hw->current_mode = current_mode;
1832         hw->current_fc_status = fc_status;
1833         pf->pause_time = pause_time;
1834
1835         return ret;
1836 }