net/hns3: fix DCB configuration
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_io.h>
6 #include <rte_ethdev.h>
7
8 #include "hns3_logs.h"
9 #include "hns3_ethdev.h"
10 #include "hns3_dcb.h"
11
12 #define HNS3_SHAPER_BS_U_DEF    5
13 #define HNS3_SHAPER_BS_S_DEF    20
14 #define BW_MAX_PERCENT          100
15
16 /*
17  * hns3_shaper_para_calc: calculate ir parameter for the shaper
18  * @ir: Rate to be config, its unit is Mbps
19  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20  * @shaper_para: shaper parameter of IR shaper
21  *
22  * the formula:
23  *
24  *              IR_b * (2 ^ IR_u) * 8
25  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
26  *              Tick * (2 ^ IR_s)
27  *
28  * @return: 0: calculate sucessful, negative: fail
29  */
30 static int
31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32                       struct hns3_shaper_parameter *shaper_para)
33 {
34 #define SHAPER_DEFAULT_IR_B     126
35 #define DIVISOR_CLK             (1000 * 8)
36 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
37
38         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39                 6 * 256,    /* Prioriy level */
40                 6 * 32,     /* Prioriy group level */
41                 6 * 8,      /* Port level */
42                 6 * 256     /* Qset level */
43         };
44         uint8_t ir_u_calc = 0;
45         uint8_t ir_s_calc = 0;
46         uint32_t denominator;
47         uint32_t ir_calc;
48         uint32_t tick;
49
50         /* Calc tick */
51         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52                 hns3_err(hw,
53                          "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54                          shaper_level, HNS3_SHAPER_LVL_CNT);
55                 return -EINVAL;
56         }
57
58         if (ir > hw->max_tm_rate) {
59                 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60                          "supported.", ir, hw->max_tm_rate);
61                 return -EINVAL;
62         }
63
64         tick = tick_array[shaper_level];
65
66         /*
67          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68          * the formula is changed to:
69          *              126 * 1 * 8
70          * ir_calc = ---------------- * 1000
71          *              tick * 1
72          */
73         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74
75         if (ir_calc == ir) {
76                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77         } else if (ir_calc > ir) {
78                 /* Increasing the denominator to select ir_s value */
79                 while (ir_calc >= ir && ir) {
80                         ir_s_calc++;
81                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82                 }
83
84                 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85                                     (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
86         } else {
87                 /*
88                  * Increasing the numerator to select ir_u value. ir_u_calc will
89                  * get maximum value when ir_calc is minimum and ir is maximum.
90                  * ir_calc gets minimum value when tick is the maximum value.
91                  * At the same time, value of ir_u_calc can only be increased up
92                  * to eight after the while loop if the value of ir is equal
93                  * to hw->max_tm_rate.
94                  */
95                 uint32_t numerator;
96                 do {
97                         ir_u_calc++;
98                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99                         ir_calc = (numerator + (tick >> 1)) / tick;
100                 } while (ir_calc < ir);
101
102                 if (ir_calc == ir) {
103                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
104                 } else {
105                         --ir_u_calc;
106
107                         /*
108                          * The maximum value of ir_u_calc in this branch is
109                          * seven in all cases. Thus, value of denominator can
110                          * not be zero here.
111                          */
112                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
113                         shaper_para->ir_b =
114                                 (ir * tick + (denominator >> 1)) / denominator;
115                 }
116         }
117
118         shaper_para->ir_u = ir_u_calc;
119         shaper_para->ir_s = ir_s_calc;
120
121         return 0;
122 }
123
124 static int
125 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
126 {
127 #define HNS3_HALF_BYTE_BIT_OFFSET 4
128         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
129
130         if (tc >= hw->dcb_info.num_tc)
131                 return -EINVAL;
132
133         /*
134          * The register for priority has four bytes, the first bytes includes
135          *  priority0 and priority1, the higher 4bit stands for priority1
136          *  while the lower 4bit stands for priority0, as below:
137          * first byte:  | pri_1 | pri_0 |
138          * second byte: | pri_3 | pri_2 |
139          * third byte:  | pri_5 | pri_4 |
140          * fourth byte: | pri_7 | pri_6 |
141          */
142         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
143
144         return 0;
145 }
146
147 static int
148 hns3_up_to_tc_map(struct hns3_hw *hw)
149 {
150         struct hns3_cmd_desc desc;
151         uint8_t *pri = (uint8_t *)desc.data;
152         uint8_t pri_id;
153         int ret;
154
155         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
156
157         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158                 ret = hns3_fill_pri_array(hw, pri, pri_id);
159                 if (ret)
160                         return ret;
161         }
162
163         return hns3_cmd_send(hw, &desc, 1);
164 }
165
166 static int
167 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
168 {
169         struct hns3_pg_to_pri_link_cmd *map;
170         struct hns3_cmd_desc desc;
171
172         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
173
174         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
175
176         map->pg_id = pg_id;
177         map->pri_bit_map = pri_bit_map;
178
179         return hns3_cmd_send(hw, &desc, 1);
180 }
181
182 static int
183 hns3_pg_to_pri_map(struct hns3_hw *hw)
184 {
185         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186         struct hns3_pf *pf = &hns->pf;
187         struct hns3_pg_info *pg_info;
188         int ret, i;
189
190         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
191                 return -EINVAL;
192
193         for (i = 0; i < hw->dcb_info.num_pg; i++) {
194                 /* Cfg pg to priority mapping */
195                 pg_info = &hw->dcb_info.pg_info[i];
196                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
197                 if (ret)
198                         return ret;
199         }
200
201         return 0;
202 }
203
204 static int
205 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
206 {
207         struct hns3_qs_to_pri_link_cmd *map;
208         struct hns3_cmd_desc desc;
209
210         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
211
212         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
213
214         map->qs_id = rte_cpu_to_le_16(qs_id);
215         map->priority = pri;
216         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
217
218         return hns3_cmd_send(hw, &desc, 1);
219 }
220
221 static int
222 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
223 {
224         struct hns3_qs_weight_cmd *weight;
225         struct hns3_cmd_desc desc;
226
227         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
228
229         weight = (struct hns3_qs_weight_cmd *)desc.data;
230
231         weight->qs_id = rte_cpu_to_le_16(qs_id);
232         weight->dwrr = dwrr;
233
234         return hns3_cmd_send(hw, &desc, 1);
235 }
236
237 static int
238 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
239 {
240 #define DEFAULT_TC_WEIGHT       1
241 #define DEFAULT_TC_OFFSET       14
242         struct hns3_ets_tc_weight_cmd *ets_weight;
243         struct hns3_cmd_desc desc;
244         uint8_t i;
245
246         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
248
249         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250                 struct hns3_pg_info *pg_info;
251
252                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
253
254                 if (!(hw->hw_tc_map & BIT(i)))
255                         continue;
256
257                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
258                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
259         }
260
261         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
262
263         return hns3_cmd_send(hw, &desc, 1);
264 }
265
266 static int
267 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
268 {
269         struct hns3_priority_weight_cmd *weight;
270         struct hns3_cmd_desc desc;
271
272         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
273
274         weight = (struct hns3_priority_weight_cmd *)desc.data;
275
276         weight->pri_id = pri_id;
277         weight->dwrr = dwrr;
278
279         return hns3_cmd_send(hw, &desc, 1);
280 }
281
282 static int
283 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
284 {
285         struct hns3_pg_weight_cmd *weight;
286         struct hns3_cmd_desc desc;
287
288         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
289
290         weight = (struct hns3_pg_weight_cmd *)desc.data;
291
292         weight->pg_id = pg_id;
293         weight->dwrr = dwrr;
294
295         return hns3_cmd_send(hw, &desc, 1);
296 }
297 static int
298 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
299 {
300         struct hns3_cmd_desc desc;
301
302         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
303
304         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
305                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
306         else
307                 desc.data[1] = 0;
308
309         desc.data[0] = rte_cpu_to_le_32(pg_id);
310
311         return hns3_cmd_send(hw, &desc, 1);
312 }
313
314 static uint32_t
315 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
316                            uint8_t bs_b, uint8_t bs_s)
317 {
318         uint32_t shapping_para = 0;
319
320         /* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
321         if (ir_b == 0)
322                 return shapping_para;
323
324         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
325         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
326         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
327         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
328         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
329
330         return shapping_para;
331 }
332
333 static int
334 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
335 {
336         struct hns3_port_shapping_cmd *shap_cfg_cmd;
337         struct hns3_shaper_parameter shaper_parameter;
338         uint32_t shapping_para;
339         uint32_t ir_u, ir_b, ir_s;
340         struct hns3_cmd_desc desc;
341         int ret;
342
343         ret = hns3_shaper_para_calc(hw, speed,
344                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
345         if (ret) {
346                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
347                 return ret;
348         }
349
350         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
351         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
352
353         ir_b = shaper_parameter.ir_b;
354         ir_u = shaper_parameter.ir_u;
355         ir_s = shaper_parameter.ir_s;
356         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
357                                                    HNS3_SHAPER_BS_U_DEF,
358                                                    HNS3_SHAPER_BS_S_DEF);
359
360         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
361
362         /*
363          * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
364          * field in hns3_port_shapping_cmd to require firmware to recalculate
365          * shapping parameters. And whether the parameters are recalculated
366          * depends on the firmware version. But driver still needs to
367          * calculate it and configure to firmware for better compatibility.
368          */
369         shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
370         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
371
372         return hns3_cmd_send(hw, &desc, 1);
373 }
374
375 int
376 hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
377 {
378         int ret;
379
380         ret = hns3_dcb_port_shaper_cfg(hw, speed);
381         if (ret)
382                 hns3_err(hw, "configure port shappering failed: ret = %d", ret);
383
384         return ret;
385 }
386
387 static int
388 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
389                          uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
390 {
391         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
392         enum hns3_opcode_type opcode;
393         struct hns3_cmd_desc desc;
394
395         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
396                  HNS3_OPC_TM_PG_C_SHAPPING;
397         hns3_cmd_setup_basic_desc(&desc, opcode, false);
398
399         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
400
401         shap_cfg_cmd->pg_id = pg_id;
402
403         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
404
405         /*
406          * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
407          * hns3_pg_shapping_cmd to require firmware to recalculate shapping
408          * parameters. And whether parameters are recalculated depends on
409          * the firmware version. But driver still needs to calculate it and
410          * configure to firmware for better compatibility.
411          */
412         shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
413         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
414
415         return hns3_cmd_send(hw, &desc, 1);
416 }
417
418 int
419 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
420 {
421         struct hns3_shaper_parameter shaper_parameter;
422         uint32_t ir_u, ir_b, ir_s;
423         uint32_t shaper_para;
424         int ret;
425
426         /* Calc shaper para */
427         ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
428                                     &shaper_parameter);
429         if (ret) {
430                 hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
431                          ret);
432                 return ret;
433         }
434
435         shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
436                                                  HNS3_SHAPER_BS_U_DEF,
437                                                  HNS3_SHAPER_BS_S_DEF);
438
439         ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
440                                        shaper_para, rate);
441         if (ret) {
442                 hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
443                          ret);
444                 return ret;
445         }
446
447         ir_b = shaper_parameter.ir_b;
448         ir_u = shaper_parameter.ir_u;
449         ir_s = shaper_parameter.ir_s;
450         shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451                                                  HNS3_SHAPER_BS_U_DEF,
452                                                  HNS3_SHAPER_BS_S_DEF);
453
454         ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
455                                        shaper_para, rate);
456         if (ret) {
457                 hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
458                          ret);
459                 return ret;
460         }
461
462         return 0;
463 }
464
465 static int
466 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
467 {
468         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
469         uint32_t rate;
470         uint8_t i;
471         int ret;
472
473         /* Cfg pg schd */
474         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
475                 return -EINVAL;
476
477         /* Pg to pri */
478         for (i = 0; i < hw->dcb_info.num_pg; i++) {
479                 rate = hw->dcb_info.pg_info[i].bw_limit;
480                 ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
481                 if (ret)
482                         return ret;
483         }
484
485         return 0;
486 }
487
488 static int
489 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
490 {
491         struct hns3_cmd_desc desc;
492
493         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
494
495         if (mode == HNS3_SCH_MODE_DWRR)
496                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
497         else
498                 desc.data[1] = 0;
499
500         desc.data[0] = rte_cpu_to_le_32(qs_id);
501
502         return hns3_cmd_send(hw, &desc, 1);
503 }
504
505 static int
506 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
507 {
508         struct hns3_cmd_desc desc;
509
510         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
511
512         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
513                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
514         else
515                 desc.data[1] = 0;
516
517         desc.data[0] = rte_cpu_to_le_32(pri_id);
518
519         return hns3_cmd_send(hw, &desc, 1);
520 }
521
522 static int
523 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
524                           uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
525 {
526         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
527         enum hns3_opcode_type opcode;
528         struct hns3_cmd_desc desc;
529
530         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
531                  HNS3_OPC_TM_PRI_C_SHAPPING;
532
533         hns3_cmd_setup_basic_desc(&desc, opcode, false);
534
535         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
536
537         shap_cfg_cmd->pri_id = pri_id;
538
539         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
540
541         /*
542          * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
543          * field in hns3_pri_shapping_cmd to require firmware to recalculate
544          * shapping parameters. And whether the parameters are recalculated
545          * depends on the firmware version. But driver still needs to
546          * calculate it and configure to firmware for better compatibility.
547          */
548         shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
549         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
550
551         return hns3_cmd_send(hw, &desc, 1);
552 }
553
554 int
555 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
556 {
557         struct hns3_shaper_parameter shaper_parameter;
558         uint32_t ir_u, ir_b, ir_s;
559         uint32_t shaper_para;
560         int ret;
561
562         ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
563                                     &shaper_parameter);
564         if (ret) {
565                 hns3_err(hw, "calculate shaper parameter failed: %d.",
566                          ret);
567                 return ret;
568         }
569
570         shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
571                                                  HNS3_SHAPER_BS_U_DEF,
572                                                  HNS3_SHAPER_BS_S_DEF);
573
574         ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
575                                         shaper_para, rate);
576         if (ret) {
577                 hns3_err(hw,
578                          "config priority CIR shaper parameter failed: %d.",
579                          ret);
580                 return ret;
581         }
582
583         ir_b = shaper_parameter.ir_b;
584         ir_u = shaper_parameter.ir_u;
585         ir_s = shaper_parameter.ir_s;
586         shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
587                                                  HNS3_SHAPER_BS_U_DEF,
588                                                  HNS3_SHAPER_BS_S_DEF);
589
590         ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
591                                         shaper_para, rate);
592         if (ret) {
593                 hns3_err(hw,
594                          "config priority PIR shaper parameter failed: %d.",
595                          ret);
596                 return ret;
597         }
598
599         return 0;
600 }
601
602 static int
603 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
604 {
605         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
606         uint32_t rate;
607         uint8_t i;
608         int ret;
609
610         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
611                 return -EINVAL;
612
613         for (i = 0; i < hw->dcb_info.num_tc; i++) {
614                 rate = hw->dcb_info.tc_info[i].bw_limit;
615                 ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
616                 if (ret) {
617                         hns3_err(hw, "config pri shaper failed: %d.", ret);
618                         return ret;
619                 }
620         }
621
622         return 0;
623 }
624
625 static int
626 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
627 {
628         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
629         uint16_t rx_qnum_per_tc;
630         uint16_t used_rx_queues;
631         int i;
632
633         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
634         if (rx_qnum_per_tc > hw->rss_size_max) {
635                 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
636                          "value (%u) hardware supported.",
637                          rx_qnum_per_tc, hw->rss_size_max);
638                 return -EINVAL;
639         }
640
641         used_rx_queues = hw->num_tc * rx_qnum_per_tc;
642         if (used_rx_queues != nb_rx_q) {
643                 hns3_err(hw, "rx queue number (%u) configured must be an "
644                          "integral multiple of valid tc number (%u).",
645                          nb_rx_q, hw->num_tc);
646                 return -EINVAL;
647         }
648         hw->alloc_rss_size = rx_qnum_per_tc;
649         hw->used_rx_queues = used_rx_queues;
650
651         /*
652          * When rss size is changed, we need to update rss redirection table
653          * maintained by driver. Besides, during the entire reset process, we
654          * need to ensure that the rss table information are not overwritten
655          * and configured directly to the hardware in the RESET_STAGE_RESTORE
656          * stage of the reset process.
657          */
658         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
659                 for (i = 0; i < hw->rss_ind_tbl_size; i++)
660                         rss_cfg->rss_indirection_tbl[i] =
661                                                         i % hw->alloc_rss_size;
662         }
663
664         return 0;
665 }
666
667 static int
668 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
669 {
670         struct hns3_tc_queue_info *tc_queue;
671         uint16_t used_tx_queues;
672         uint16_t tx_qnum_per_tc;
673         uint8_t i;
674
675         tx_qnum_per_tc = nb_tx_q / hw->num_tc;
676         used_tx_queues = hw->num_tc * tx_qnum_per_tc;
677         if (used_tx_queues != nb_tx_q) {
678                 hns3_err(hw, "tx queue number (%u) configured must be an "
679                          "integral multiple of valid tc number (%u).",
680                          nb_tx_q, hw->num_tc);
681                 return -EINVAL;
682         }
683
684         hw->used_tx_queues = used_tx_queues;
685         hw->tx_qnum_per_tc = tx_qnum_per_tc;
686         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
687                 tc_queue = &hw->tc_queue[i];
688                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
689                         tc_queue->enable = true;
690                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
691                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
692                         tc_queue->tc = i;
693                 } else {
694                         /* Set to default queue if TC is disable */
695                         tc_queue->enable = false;
696                         tc_queue->tqp_offset = 0;
697                         tc_queue->tqp_count = 0;
698                         tc_queue->tc = 0;
699                 }
700         }
701
702         return 0;
703 }
704
705 uint8_t
706 hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
707 {
708         struct hns3_tc_queue_info *tc_queue;
709         uint8_t i;
710
711         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
712                 tc_queue = &hw->tc_queue[i];
713                 if (!tc_queue->enable)
714                         continue;
715
716                 if (txq_no >= tc_queue->tqp_offset &&
717                     txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
718                         return i;
719         }
720
721         /* return TC0 in default case */
722         return 0;
723 }
724
725 int
726 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
727 {
728         int ret;
729
730         if (nb_rx_q < hw->num_tc) {
731                 hns3_err(hw, "number of Rx queues(%u) is less than number of TC(%u).",
732                          nb_rx_q, hw->num_tc);
733                 return -EINVAL;
734         }
735
736         if (nb_tx_q < hw->num_tc) {
737                 hns3_err(hw, "number of Tx queues(%u) is less than number of TC(%u).",
738                          nb_tx_q, hw->num_tc);
739                 return -EINVAL;
740         }
741
742         ret = hns3_set_rss_size(hw, nb_rx_q);
743         if (ret)
744                 return ret;
745
746         return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
747 }
748
749 static int
750 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
751                                  uint16_t nb_tx_q)
752 {
753         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
754         struct hns3_pf *pf = &hns->pf;
755         int ret;
756
757         hw->num_tc = hw->dcb_info.num_tc;
758         ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
759         if (ret)
760                 return ret;
761
762         if (!hns->is_vf)
763                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
764
765         return 0;
766 }
767
768 int
769 hns3_dcb_info_init(struct hns3_hw *hw)
770 {
771         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
772         struct hns3_pf *pf = &hns->pf;
773         int i, k;
774
775         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
776             hw->dcb_info.num_pg != 1)
777                 return -EINVAL;
778
779         /* Initializing PG information */
780         memset(hw->dcb_info.pg_info, 0,
781                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
782         for (i = 0; i < hw->dcb_info.num_pg; i++) {
783                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
784                 hw->dcb_info.pg_info[i].pg_id = i;
785                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
786                 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
787
788                 if (i != 0)
789                         continue;
790
791                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
792                 for (k = 0; k < hw->dcb_info.num_tc; k++)
793                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
794         }
795
796         /* All UPs mapping to TC0 */
797         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
798                 hw->dcb_info.prio_tc[i] = 0;
799
800         /* Initializing tc information */
801         memset(hw->dcb_info.tc_info, 0,
802                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
803         for (i = 0; i < hw->dcb_info.num_tc; i++) {
804                 hw->dcb_info.tc_info[i].tc_id = i;
805                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
806                 hw->dcb_info.tc_info[i].pgid = 0;
807                 hw->dcb_info.tc_info[i].bw_limit =
808                         hw->dcb_info.pg_info[0].bw_limit;
809         }
810
811         return 0;
812 }
813
814 static int
815 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
816 {
817         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
818         struct hns3_pf *pf = &hns->pf;
819         int ret, i;
820
821         /* Only being config on TC-Based scheduler mode */
822         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
823                 return -EINVAL;
824
825         for (i = 0; i < hw->dcb_info.num_pg; i++) {
826                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
827                 if (ret)
828                         return ret;
829         }
830
831         return 0;
832 }
833
834 static int
835 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
836 {
837         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
838         struct hns3_pf *pf = &hns->pf;
839         uint8_t i;
840         int ret;
841
842         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
843                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
844                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
845                         if (ret)
846                                 return ret;
847
848                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
849                                                         HNS3_SCH_MODE_DWRR);
850                         if (ret)
851                                 return ret;
852                 }
853         }
854
855         return 0;
856 }
857
858 static int
859 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
860 {
861         int ret;
862
863         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
864         if (ret) {
865                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
866                 return ret;
867         }
868
869         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
870         if (ret)
871                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
872
873         return ret;
874 }
875
876 static int
877 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
878 {
879         struct hns3_pg_info *pg_info;
880         uint8_t dwrr;
881         int ret, i;
882
883         for (i = 0; i < hw->dcb_info.num_tc; i++) {
884                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
885                 dwrr = pg_info->tc_dwrr[i];
886
887                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
888                 if (ret) {
889                         hns3_err(hw,
890                                "fail to send priority weight cmd: %d, ret = %d",
891                                i, ret);
892                         return ret;
893                 }
894
895                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
896                 if (ret) {
897                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
898                                  i, ret);
899                         return ret;
900                 }
901         }
902
903         return 0;
904 }
905
906 static int
907 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
908 {
909         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
910         struct hns3_pf *pf = &hns->pf;
911         uint32_t version;
912         int ret;
913
914         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
915                 return -EINVAL;
916
917         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
918         if (ret)
919                 return ret;
920
921         if (!hns3_dev_dcb_supported(hw))
922                 return 0;
923
924         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
925         if (ret == -EOPNOTSUPP) {
926                 version = hw->fw_version;
927                 hns3_warn(hw,
928                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
929                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
930                                          HNS3_FW_VERSION_BYTE3_S),
931                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
932                                          HNS3_FW_VERSION_BYTE2_S),
933                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
934                                          HNS3_FW_VERSION_BYTE1_S),
935                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
936                                          HNS3_FW_VERSION_BYTE0_S));
937                 ret = 0;
938         }
939
940         return ret;
941 }
942
943 static int
944 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
945 {
946         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
947         struct hns3_pf *pf = &hns->pf;
948         int ret, i;
949
950         /* Cfg pg schd */
951         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
952                 return -EINVAL;
953
954         /* Cfg pg to prio */
955         for (i = 0; i < hw->dcb_info.num_pg; i++) {
956                 /* Cfg dwrr */
957                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
958                 if (ret)
959                         return ret;
960         }
961
962         return 0;
963 }
964
965 static int
966 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
967 {
968         int ret;
969
970         ret = hns3_dcb_pg_dwrr_cfg(hw);
971         if (ret) {
972                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
973                 return ret;
974         }
975
976         ret = hns3_dcb_pri_dwrr_cfg(hw);
977         if (ret)
978                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
979
980         return ret;
981 }
982
983 static int
984 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
985 {
986         int ret;
987
988         ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
989         if (ret) {
990                 hns3_err(hw, "config port shaper failed: %d", ret);
991                 return ret;
992         }
993
994         ret = hns3_dcb_pg_shaper_cfg(hw);
995         if (ret) {
996                 hns3_err(hw, "config pg shaper failed: %d", ret);
997                 return ret;
998         }
999
1000         return hns3_dcb_pri_shaper_cfg(hw);
1001 }
1002
1003 static int
1004 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
1005 {
1006         struct hns3_nq_to_qs_link_cmd *map;
1007         struct hns3_cmd_desc desc;
1008         uint16_t tmp_qs_id = 0;
1009         uint16_t qs_id_l;
1010         uint16_t qs_id_h;
1011
1012         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
1013
1014         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
1015
1016         map->nq_id = rte_cpu_to_le_16(q_id);
1017
1018         /*
1019          * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
1020          * configure qset_id. So we need to convert qs_id to the follow
1021          * format to support qset_id > 1024.
1022          * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
1023          *            /         / \         \
1024          *           /         /   \         \
1025          * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
1026          *          | qs_id_h | vld | qs_id_l |
1027          */
1028         qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1029                                  HNS3_DCB_QS_ID_L_S);
1030         qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1031                                  HNS3_DCB_QS_ID_H_S);
1032         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1033                        qs_id_l);
1034         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1035                        HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1036         map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1037
1038         return hns3_cmd_send(hw, &desc, 1);
1039 }
1040
1041 static int
1042 hns3_q_to_qs_map(struct hns3_hw *hw)
1043 {
1044         struct hns3_tc_queue_info *tc_queue;
1045         uint16_t q_id;
1046         uint32_t i, j;
1047         int ret;
1048
1049         for (i = 0; i < hw->num_tc; i++) {
1050                 tc_queue = &hw->tc_queue[i];
1051                 for (j = 0; j < tc_queue->tqp_count; j++) {
1052                         q_id = tc_queue->tqp_offset + j;
1053                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1054                         if (ret)
1055                                 return ret;
1056                 }
1057         }
1058
1059         return 0;
1060 }
1061
1062 static int
1063 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1064 {
1065         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1066         struct hns3_pf *pf = &hns->pf;
1067         uint32_t i;
1068         int ret;
1069
1070         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1071                 return -EINVAL;
1072
1073         /* Cfg qs -> pri mapping */
1074         for (i = 0; i < hw->num_tc; i++) {
1075                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1076                 if (ret) {
1077                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1078                         return ret;
1079                 }
1080         }
1081
1082         /* Cfg q -> qs mapping */
1083         ret = hns3_q_to_qs_map(hw);
1084         if (ret)
1085                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1086
1087         return ret;
1088 }
1089
1090 static int
1091 hns3_dcb_map_cfg(struct hns3_hw *hw)
1092 {
1093         int ret;
1094
1095         ret = hns3_up_to_tc_map(hw);
1096         if (ret) {
1097                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1098                 return ret;
1099         }
1100
1101         ret = hns3_pg_to_pri_map(hw);
1102         if (ret) {
1103                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1104                 return ret;
1105         }
1106
1107         return hns3_pri_q_qs_cfg(hw);
1108 }
1109
1110 static int
1111 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1112 {
1113         int ret;
1114
1115         /* Cfg dcb mapping  */
1116         ret = hns3_dcb_map_cfg(hw);
1117         if (ret)
1118                 return ret;
1119
1120         /* Cfg dcb shaper */
1121         ret = hns3_dcb_shaper_cfg(hw);
1122         if (ret)
1123                 return ret;
1124
1125         /* Cfg dwrr */
1126         ret = hns3_dcb_dwrr_cfg(hw);
1127         if (ret)
1128                 return ret;
1129
1130         /* Cfg schd mode for each level schd */
1131         return hns3_dcb_schd_mode_cfg(hw);
1132 }
1133
1134 static int
1135 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1136                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
1137 {
1138         struct hns3_cfg_pause_param_cmd *pause_param;
1139         struct hns3_cmd_desc desc;
1140
1141         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1142
1143         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1144
1145         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1146         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1147         pause_param->pause_trans_gap = pause_trans_gap;
1148         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1149
1150         return hns3_cmd_send(hw, &desc, 1);
1151 }
1152
1153 int
1154 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1155 {
1156         struct hns3_cfg_pause_param_cmd *pause_param;
1157         struct hns3_cmd_desc desc;
1158         uint16_t trans_time;
1159         uint8_t trans_gap;
1160         int ret;
1161
1162         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1163
1164         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1165
1166         ret = hns3_cmd_send(hw, &desc, 1);
1167         if (ret)
1168                 return ret;
1169
1170         trans_gap = pause_param->pause_trans_gap;
1171         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1172
1173         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1174 }
1175
1176 static int
1177 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1178 {
1179 #define PAUSE_TIME_DIV_BY       2
1180 #define PAUSE_TIME_MIN_VALUE    0x4
1181
1182         struct hns3_mac *mac = &hw->mac;
1183         uint8_t pause_trans_gap;
1184
1185         /*
1186          * Pause transmit gap must be less than "pause_time / 2", otherwise
1187          * the behavior of MAC is undefined.
1188          */
1189         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1190                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1191         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1192                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1193                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1194         else {
1195                 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1196                 pause_time = PAUSE_TIME_MIN_VALUE;
1197                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1198         }
1199
1200         return hns3_pause_param_cfg(hw, mac->mac_addr,
1201                                     pause_trans_gap, pause_time);
1202 }
1203
1204 static int
1205 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1206 {
1207         struct hns3_cmd_desc desc;
1208
1209         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1210
1211         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1212                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1213
1214         return hns3_cmd_send(hw, &desc, 1);
1215 }
1216
1217 static int
1218 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1219 {
1220         struct hns3_cmd_desc desc;
1221         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1222
1223         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1224
1225         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1226                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1227
1228         pfc->pri_en_bitmap = pfc_bitmap;
1229
1230         return hns3_cmd_send(hw, &desc, 1);
1231 }
1232
1233 static int
1234 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1235 {
1236         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1237         struct hns3_cmd_desc desc;
1238
1239         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1240
1241         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1242
1243         bp_to_qs_map_cmd->tc_id = tc;
1244         bp_to_qs_map_cmd->qs_group_id = grp_id;
1245         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1246
1247         return hns3_cmd_send(hw, &desc, 1);
1248 }
1249
1250 static void
1251 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1252 {
1253         switch (hw->requested_fc_mode) {
1254         case HNS3_FC_NONE:
1255                 *tx_en = false;
1256                 *rx_en = false;
1257                 break;
1258         case HNS3_FC_RX_PAUSE:
1259                 *tx_en = false;
1260                 *rx_en = true;
1261                 break;
1262         case HNS3_FC_TX_PAUSE:
1263                 *tx_en = true;
1264                 *rx_en = false;
1265                 break;
1266         case HNS3_FC_FULL:
1267                 *tx_en = true;
1268                 *rx_en = true;
1269                 break;
1270         default:
1271                 *tx_en = false;
1272                 *rx_en = false;
1273                 break;
1274         }
1275 }
1276
1277 static int
1278 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1279 {
1280         bool tx_en, rx_en;
1281
1282         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1283                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1284         else {
1285                 tx_en = false;
1286                 rx_en = false;
1287         }
1288
1289         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1290 }
1291
1292 static int
1293 hns3_pfc_setup_hw(struct hns3_hw *hw)
1294 {
1295         bool tx_en, rx_en;
1296
1297         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1298                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1299         else {
1300                 tx_en = false;
1301                 rx_en = false;
1302         }
1303
1304         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1305 }
1306
1307 /*
1308  * Each Tc has a 1024 queue sets to backpress, it divides to
1309  * 32 group, each group contains 32 queue sets, which can be
1310  * represented by uint32_t bitmap.
1311  */
1312 static int
1313 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1314 {
1315         uint32_t qs_bitmap;
1316         int ret;
1317         int i;
1318
1319         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1320                 uint8_t grp, sub_grp;
1321                 qs_bitmap = 0;
1322
1323                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1324                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1325                                          HNS3_BP_SUB_GRP_ID_S);
1326                 if (i == grp)
1327                         qs_bitmap |= (1 << sub_grp);
1328
1329                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1330                 if (ret)
1331                         return ret;
1332         }
1333
1334         return 0;
1335 }
1336
1337 static int
1338 hns3_dcb_bp_setup(struct hns3_hw *hw)
1339 {
1340         int ret, i;
1341
1342         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1343                 ret = hns3_bp_setup_hw(hw, i);
1344                 if (ret)
1345                         return ret;
1346         }
1347
1348         return 0;
1349 }
1350
1351 static int
1352 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1353 {
1354         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1355         struct hns3_pf *pf = &hns->pf;
1356         int ret;
1357
1358         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1359         if (ret) {
1360                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1361                 return ret;
1362         }
1363
1364         ret = hns3_mac_pause_setup_hw(hw);
1365         if (ret) {
1366                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1367                 return ret;
1368         }
1369
1370         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1371         if (!hns3_dev_dcb_supported(hw))
1372                 return 0;
1373
1374         ret = hns3_pfc_setup_hw(hw);
1375         if (ret) {
1376                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1377                 return ret;
1378         }
1379
1380         return hns3_dcb_bp_setup(hw);
1381 }
1382
1383 static uint8_t
1384 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1385 {
1386         uint8_t pfc_map = 0;
1387         uint8_t *prio_tc;
1388         uint8_t i, j;
1389
1390         prio_tc = hw->dcb_info.prio_tc;
1391         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1392                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1393                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1394                                 pfc_map |= BIT(i);
1395                                 break;
1396                         }
1397                 }
1398         }
1399
1400         return pfc_map;
1401 }
1402
1403 static void
1404 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1405 {
1406         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1407         struct hns3_hw *hw = &hns->hw;
1408         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1409         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1410         uint8_t max_tc = 0;
1411         uint8_t pfc_en;
1412         int i;
1413
1414         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1415         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1416                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1417                         *changed = true;
1418
1419                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1420                         max_tc = dcb_rx_conf->dcb_tc[i];
1421         }
1422         *tc = max_tc + 1;
1423         if (*tc != hw->dcb_info.num_tc)
1424                 *changed = true;
1425
1426         /*
1427          * We ensure that dcb information can be reconfigured
1428          * after the hns3_priority_flow_ctrl_set function called.
1429          */
1430         if (hw->requested_fc_mode != HNS3_FC_FULL)
1431                 *changed = true;
1432         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1433         if (hw->dcb_info.pfc_en != pfc_en)
1434                 *changed = true;
1435
1436         /* tx/rx queue number is reconfigured. */
1437         if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
1438                 *changed = true;
1439 }
1440
1441 static int
1442 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1443 {
1444         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1445         struct hns3_pf *pf = &hns->pf;
1446         struct hns3_hw *hw = &hns->hw;
1447         uint8_t tc_bw, bw_rest;
1448         uint8_t i, j;
1449         int ret;
1450
1451         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1452         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1453         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1454
1455         /* Config pg0 */
1456         memset(hw->dcb_info.pg_info, 0,
1457                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1458         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1459         hw->dcb_info.pg_info[0].pg_id = 0;
1460         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1461         hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1462         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1463
1464         /* Each tc has same bw for valid tc by default */
1465         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1466         for (i = 0; i < hw->dcb_info.num_tc; i++)
1467                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1468         /* To ensure the sum of tc_dwrr is equal to 100 */
1469         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1470         for (j = 0; j < bw_rest; j++)
1471                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1472         for (; i < dcb_rx_conf->nb_tcs; i++)
1473                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1474
1475         /* All tcs map to pg0 */
1476         memset(hw->dcb_info.tc_info, 0,
1477                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1478         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1479                 hw->dcb_info.tc_info[i].tc_id = i;
1480                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1481                 hw->dcb_info.tc_info[i].pgid = 0;
1482                 hw->dcb_info.tc_info[i].bw_limit =
1483                                         hw->dcb_info.pg_info[0].bw_limit;
1484         }
1485
1486         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1487                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1488
1489         ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1490                                                hw->data->nb_tx_queues);
1491         if (ret)
1492                 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1493
1494         return ret;
1495 }
1496
1497 static int
1498 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1499 {
1500         struct hns3_pf *pf = &hns->pf;
1501         struct hns3_hw *hw = &hns->hw;
1502         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1503         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1504         uint8_t bit_map = 0;
1505         uint8_t i;
1506
1507         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1508             hw->dcb_info.num_pg != 1)
1509                 return -EINVAL;
1510
1511         if (nb_rx_q < num_tc) {
1512                 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1513                          nb_rx_q, num_tc);
1514                 return -EINVAL;
1515         }
1516
1517         if (nb_tx_q < num_tc) {
1518                 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1519                          nb_tx_q, num_tc);
1520                 return -EINVAL;
1521         }
1522
1523         /* Currently not support uncontinuous tc */
1524         hw->dcb_info.num_tc = num_tc;
1525         for (i = 0; i < hw->dcb_info.num_tc; i++)
1526                 bit_map |= BIT(i);
1527
1528         if (!bit_map) {
1529                 bit_map = 1;
1530                 hw->dcb_info.num_tc = 1;
1531         }
1532         hw->hw_tc_map = bit_map;
1533
1534         return hns3_dcb_info_cfg(hns);
1535 }
1536
1537 static int
1538 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1539 {
1540         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1541         struct hns3_pf *pf = &hns->pf;
1542         struct hns3_hw *hw = &hns->hw;
1543         enum hns3_fc_status fc_status = hw->current_fc_status;
1544         enum hns3_fc_mode requested_fc_mode = hw->requested_fc_mode;
1545         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1546         int ret;
1547
1548         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1549             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1550                 return -ENOTSUP;
1551
1552         ret = hns3_dcb_schd_setup_hw(hw);
1553         if (ret) {
1554                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1555                 return ret;
1556         }
1557
1558         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1559                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1560                 if (dcb_rx_conf->nb_tcs == 0)
1561                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1562                 else
1563                         hw->dcb_info.pfc_en =
1564                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1565
1566                 hw->dcb_info.hw_pfc_map =
1567                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1568
1569                 ret = hns3_buffer_alloc(hw);
1570                 if (ret)
1571                         goto buffer_alloc_fail;
1572
1573                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1574                 hw->requested_fc_mode = HNS3_FC_FULL;
1575                 ret = hns3_dcb_pause_setup_hw(hw);
1576                 if (ret) {
1577                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1578                         goto pfc_setup_fail;
1579                 }
1580         } else {
1581                 /*
1582                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1583                  * flag, the DCB information is configured, such as tc numbers.
1584                  * Therefore, refreshing the allocation of packet buffer is
1585                  * necessary.
1586                  */
1587                 ret = hns3_buffer_alloc(hw);
1588                 if (ret)
1589                         return ret;
1590         }
1591
1592         return 0;
1593
1594 pfc_setup_fail:
1595         hw->requested_fc_mode = requested_fc_mode;
1596         hw->current_fc_status = fc_status;
1597
1598 buffer_alloc_fail:
1599         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1600
1601         return ret;
1602 }
1603
1604 /*
1605  * hns3_dcb_configure - setup dcb related config
1606  * @hns: pointer to hns3 adapter
1607  * Returns 0 on success, negative value on failure.
1608  */
1609 int
1610 hns3_dcb_configure(struct hns3_adapter *hns)
1611 {
1612         struct hns3_hw *hw = &hns->hw;
1613         bool map_changed = false;
1614         uint8_t num_tc = 0;
1615         int ret;
1616
1617         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1618         if (map_changed) {
1619                 ret = hns3_dcb_info_update(hns, num_tc);
1620                 if (ret) {
1621                         hns3_err(hw, "dcb info update failed: %d", ret);
1622                         return ret;
1623                 }
1624
1625                 ret = hns3_dcb_hw_configure(hns);
1626                 if (ret) {
1627                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1628                         return ret;
1629                 }
1630         }
1631
1632         return 0;
1633 }
1634
1635 int
1636 hns3_dcb_init_hw(struct hns3_hw *hw)
1637 {
1638         int ret;
1639
1640         ret = hns3_dcb_schd_setup_hw(hw);
1641         if (ret) {
1642                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1643                 return ret;
1644         }
1645
1646         ret = hns3_dcb_pause_setup_hw(hw);
1647         if (ret)
1648                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1649
1650         return ret;
1651 }
1652
1653 int
1654 hns3_dcb_init(struct hns3_hw *hw)
1655 {
1656         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1657         struct hns3_pf *pf = &hns->pf;
1658         uint16_t default_tqp_num;
1659         int ret;
1660
1661         PMD_INIT_FUNC_TRACE();
1662
1663         /*
1664          * According to the 'adapter_state' identifier, the following branch
1665          * is only executed to initialize default configurations of dcb during
1666          * the initializing driver process. Due to driver saving dcb-related
1667          * information before reset triggered, the reinit dev stage of the
1668          * reset process can not access to the branch, or those information
1669          * will be changed.
1670          */
1671         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1672                 hw->requested_fc_mode = HNS3_FC_NONE;
1673                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1674                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1675
1676                 ret = hns3_dcb_info_init(hw);
1677                 if (ret) {
1678                         hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1679                         return ret;
1680                 }
1681
1682                 /*
1683                  * The number of queues configured by default cannot exceed
1684                  * the maximum number of queues for a single TC.
1685                  */
1686                 default_tqp_num = RTE_MIN(hw->rss_size_max,
1687                                           hw->tqps_num / hw->dcb_info.num_tc);
1688                 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1689                                                        default_tqp_num);
1690                 if (ret) {
1691                         hns3_err(hw,
1692                                  "update tc queue mapping failed, ret = %d.",
1693                                  ret);
1694                         return ret;
1695                 }
1696         }
1697
1698         /*
1699          * DCB hardware will be configured by following the function during
1700          * the initializing driver process and the reset process. However,
1701          * driver will restore directly configurations of dcb hardware based
1702          * on dcb-related information soft maintained when driver
1703          * initialization has finished and reset is coming.
1704          */
1705         ret = hns3_dcb_init_hw(hw);
1706         if (ret) {
1707                 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1708                 return ret;
1709         }
1710
1711         return 0;
1712 }
1713
1714 int
1715 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1716 {
1717         struct hns3_hw *hw = &hns->hw;
1718         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1719         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1720         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1721         int ret;
1722
1723         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
1724                 return 0;
1725
1726         ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1727         if (ret) {
1728                 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1729                          ret);
1730                 return ret;
1731         }
1732         ret = hns3_q_to_qs_map(hw);
1733         if (ret)
1734                 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1735
1736         return ret;
1737 }
1738
1739 static void
1740 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
1741 {
1742         switch (mode) {
1743         case RTE_FC_NONE:
1744                 hw->requested_fc_mode = HNS3_FC_NONE;
1745                 break;
1746         case RTE_FC_RX_PAUSE:
1747                 hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
1748                 break;
1749         case RTE_FC_TX_PAUSE:
1750                 hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
1751                 break;
1752         case RTE_FC_FULL:
1753                 hw->requested_fc_mode = HNS3_FC_FULL;
1754                 break;
1755         default:
1756                 hw->requested_fc_mode = HNS3_FC_NONE;
1757                 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
1758                           "configured to RTE_FC_NONE", mode);
1759                 break;
1760         }
1761 }
1762
1763 /*
1764  * hns3_dcb_pfc_enable - Enable priority flow control
1765  * @dev: pointer to ethernet device
1766  *
1767  * Configures the pfc settings for one porority.
1768  */
1769 int
1770 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1771 {
1772         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1774         enum hns3_fc_status fc_status = hw->current_fc_status;
1775         enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1776         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1777         uint8_t pfc_en = hw->dcb_info.pfc_en;
1778         uint8_t priority = pfc_conf->priority;
1779         uint16_t pause_time = pf->pause_time;
1780         int ret;
1781
1782         pf->pause_time = pfc_conf->fc.pause_time;
1783         hns3_get_fc_mode(hw, pfc_conf->fc.mode);
1784         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1785         hw->dcb_info.pfc_en |= BIT(priority);
1786         hw->dcb_info.hw_pfc_map =
1787                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1788         ret = hns3_buffer_alloc(hw);
1789         if (ret)
1790                 goto pfc_setup_fail;
1791
1792         /*
1793          * The flow control mode of all UPs will be changed based on
1794          * requested_fc_mode coming from user.
1795          */
1796         ret = hns3_dcb_pause_setup_hw(hw);
1797         if (ret) {
1798                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1799                 goto pfc_setup_fail;
1800         }
1801
1802         return 0;
1803
1804 pfc_setup_fail:
1805         hw->requested_fc_mode = old_fc_mode;
1806         hw->current_fc_status = fc_status;
1807         pf->pause_time = pause_time;
1808         hw->dcb_info.pfc_en = pfc_en;
1809         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1810
1811         return ret;
1812 }
1813
1814 /*
1815  * hns3_fc_enable - Enable MAC pause
1816  * @dev: pointer to ethernet device
1817  *
1818  * Configures the MAC pause settings.
1819  */
1820 int
1821 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1822 {
1823         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1824         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1825         enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1826         enum hns3_fc_status fc_status = hw->current_fc_status;
1827         uint16_t pause_time = pf->pause_time;
1828         int ret;
1829
1830         pf->pause_time = fc_conf->pause_time;
1831         hns3_get_fc_mode(hw, fc_conf->mode);
1832
1833         /*
1834          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1835          * of flow control is configured to be HNS3_FC_NONE.
1836          */
1837         if (hw->requested_fc_mode == HNS3_FC_NONE)
1838                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1839         else
1840                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1841
1842         ret = hns3_dcb_pause_setup_hw(hw);
1843         if (ret) {
1844                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1845                 goto setup_fc_fail;
1846         }
1847
1848         return 0;
1849
1850 setup_fc_fail:
1851         hw->requested_fc_mode = old_fc_mode;
1852         hw->current_fc_status = fc_status;
1853         pf->pause_time = pause_time;
1854
1855         return ret;
1856 }