net/hns3: add error code to some logs
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <rte_io.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
17 #include "hns3_dcb.h"
18
19 #define HNS3_SHAPER_BS_U_DEF    5
20 #define HNS3_SHAPER_BS_S_DEF    20
21 #define BW_MAX_PERCENT          100
22 #define HNS3_ETHER_MAX_RATE     100000
23
24 /*
25  * hns3_shaper_para_calc: calculate ir parameter for the shaper
26  * @ir: Rate to be config, its unit is Mbps
27  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
28  * @shaper_para: shaper parameter of IR shaper
29  *
30  * the formula:
31  *
32  *              IR_b * (2 ^ IR_u) * 8
33  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
34  *              Tick * (2 ^ IR_s)
35  *
36  * @return: 0: calculate sucessful, negative: fail
37  */
38 static int
39 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
40                       struct hns3_shaper_parameter *shaper_para)
41 {
42 #define SHAPER_DEFAULT_IR_B     126
43 #define DIVISOR_CLK             (1000 * 8)
44 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
45
46         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
47                 6 * 256,    /* Prioriy level */
48                 6 * 32,     /* Prioriy group level */
49                 6 * 8,      /* Port level */
50                 6 * 256     /* Qset level */
51         };
52         uint8_t ir_u_calc = 0;
53         uint8_t ir_s_calc = 0;
54         uint32_t denominator;
55         uint32_t ir_calc;
56         uint32_t tick;
57
58         /* Calc tick */
59         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
60                 hns3_err(hw,
61                          "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
62                          shaper_level, HNS3_SHAPER_LVL_CNT);
63                 return -EINVAL;
64         }
65
66         if (ir > HNS3_ETHER_MAX_RATE) {
67                 hns3_err(hw, "rate(%d) exceeds the rate driver supported "
68                          "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE);
69                 return -EINVAL;
70         }
71
72         tick = tick_array[shaper_level];
73
74         /*
75          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
76          * the formula is changed to:
77          *              126 * 1 * 8
78          * ir_calc = ---------------- * 1000
79          *              tick * 1
80          */
81         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
82
83         if (ir_calc == ir) {
84                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
85         } else if (ir_calc > ir) {
86                 /* Increasing the denominator to select ir_s value */
87                 do {
88                         ir_s_calc++;
89                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
90                 } while (ir_calc > ir);
91
92                 if (ir_calc == ir)
93                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
94                 else
95                         shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
96                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
97         } else {
98                 /*
99                  * Increasing the numerator to select ir_u value. ir_u_calc will
100                  * get maximum value when ir_calc is minimum and ir is maximum.
101                  * ir_calc gets minimum value when tick is the maximum value.
102                  * At the same time, value of ir_u_calc can only be increased up
103                  * to eight after the while loop if the value of ir is equal
104                  * to HNS3_ETHER_MAX_RATE.
105                  */
106                 uint32_t numerator;
107                 do {
108                         ir_u_calc++;
109                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
110                         ir_calc = (numerator + (tick >> 1)) / tick;
111                 } while (ir_calc < ir);
112
113                 if (ir_calc == ir) {
114                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
115                 } else {
116                         --ir_u_calc;
117
118                         /*
119                          * The maximum value of ir_u_calc in this branch is
120                          * seven in all cases. Thus, value of denominator can
121                          * not be zero here.
122                          */
123                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
124                         shaper_para->ir_b =
125                                 (ir * tick + (denominator >> 1)) / denominator;
126                 }
127         }
128
129         shaper_para->ir_u = ir_u_calc;
130         shaper_para->ir_s = ir_s_calc;
131
132         return 0;
133 }
134
135 static int
136 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
137 {
138 #define HNS3_HALF_BYTE_BIT_OFFSET 4
139         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
140
141         if (tc >= hw->dcb_info.num_tc)
142                 return -EINVAL;
143
144         /*
145          * The register for priority has four bytes, the first bytes includes
146          *  priority0 and priority1, the higher 4bit stands for priority1
147          *  while the lower 4bit stands for priority0, as below:
148          * first byte:  | pri_1 | pri_0 |
149          * second byte: | pri_3 | pri_2 |
150          * third byte:  | pri_5 | pri_4 |
151          * fourth byte: | pri_7 | pri_6 |
152          */
153         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
154
155         return 0;
156 }
157
158 static int
159 hns3_up_to_tc_map(struct hns3_hw *hw)
160 {
161         struct hns3_cmd_desc desc;
162         uint8_t *pri = (uint8_t *)desc.data;
163         uint8_t pri_id;
164         int ret;
165
166         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
167
168         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
169                 ret = hns3_fill_pri_array(hw, pri, pri_id);
170                 if (ret)
171                         return ret;
172         }
173
174         return hns3_cmd_send(hw, &desc, 1);
175 }
176
177 static int
178 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
179 {
180         struct hns3_pg_to_pri_link_cmd *map;
181         struct hns3_cmd_desc desc;
182
183         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
184
185         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
186
187         map->pg_id = pg_id;
188         map->pri_bit_map = pri_bit_map;
189
190         return hns3_cmd_send(hw, &desc, 1);
191 }
192
193 static int
194 hns3_pg_to_pri_map(struct hns3_hw *hw)
195 {
196         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
197         struct hns3_pf *pf = &hns->pf;
198         struct hns3_pg_info *pg_info;
199         int ret, i;
200
201         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
202                 return -EINVAL;
203
204         for (i = 0; i < hw->dcb_info.num_pg; i++) {
205                 /* Cfg pg to priority mapping */
206                 pg_info = &hw->dcb_info.pg_info[i];
207                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
208                 if (ret)
209                         return ret;
210         }
211
212         return 0;
213 }
214
215 static int
216 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
217 {
218         struct hns3_qs_to_pri_link_cmd *map;
219         struct hns3_cmd_desc desc;
220
221         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
222
223         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
224
225         map->qs_id = rte_cpu_to_le_16(qs_id);
226         map->priority = pri;
227         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
228
229         return hns3_cmd_send(hw, &desc, 1);
230 }
231
232 static int
233 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
234 {
235         struct hns3_qs_weight_cmd *weight;
236         struct hns3_cmd_desc desc;
237
238         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
239
240         weight = (struct hns3_qs_weight_cmd *)desc.data;
241
242         weight->qs_id = rte_cpu_to_le_16(qs_id);
243         weight->dwrr = dwrr;
244
245         return hns3_cmd_send(hw, &desc, 1);
246 }
247
248 static int
249 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
250 {
251 #define DEFAULT_TC_WEIGHT       1
252 #define DEFAULT_TC_OFFSET       14
253         struct hns3_ets_tc_weight_cmd *ets_weight;
254         struct hns3_cmd_desc desc;
255         uint8_t i;
256
257         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
258         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
259
260         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
261                 struct hns3_pg_info *pg_info;
262
263                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
264
265                 if (!(hw->hw_tc_map & BIT(i)))
266                         continue;
267
268                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
269                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
270         }
271
272         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
273
274         return hns3_cmd_send(hw, &desc, 1);
275 }
276
277 static int
278 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
279 {
280         struct hns3_priority_weight_cmd *weight;
281         struct hns3_cmd_desc desc;
282
283         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
284
285         weight = (struct hns3_priority_weight_cmd *)desc.data;
286
287         weight->pri_id = pri_id;
288         weight->dwrr = dwrr;
289
290         return hns3_cmd_send(hw, &desc, 1);
291 }
292
293 static int
294 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
295 {
296         struct hns3_pg_weight_cmd *weight;
297         struct hns3_cmd_desc desc;
298
299         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
300
301         weight = (struct hns3_pg_weight_cmd *)desc.data;
302
303         weight->pg_id = pg_id;
304         weight->dwrr = dwrr;
305
306         return hns3_cmd_send(hw, &desc, 1);
307 }
308 static int
309 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
310 {
311         struct hns3_cmd_desc desc;
312
313         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
314
315         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
316                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
317         else
318                 desc.data[1] = 0;
319
320         desc.data[0] = rte_cpu_to_le_32(pg_id);
321
322         return hns3_cmd_send(hw, &desc, 1);
323 }
324
325 static uint32_t
326 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
327                            uint8_t bs_b, uint8_t bs_s)
328 {
329         uint32_t shapping_para = 0;
330
331         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
332         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
333         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
334         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
335         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
336
337         return shapping_para;
338 }
339
340 static int
341 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
342 {
343         struct hns3_port_shapping_cmd *shap_cfg_cmd;
344         struct hns3_shaper_parameter shaper_parameter;
345         uint32_t shapping_para;
346         uint32_t ir_u, ir_b, ir_s;
347         struct hns3_cmd_desc desc;
348         int ret;
349
350         ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
351                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
352         if (ret) {
353                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
354                 return ret;
355         }
356
357         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
358         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
359
360         ir_b = shaper_parameter.ir_b;
361         ir_u = shaper_parameter.ir_u;
362         ir_s = shaper_parameter.ir_s;
363         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
364                                                    HNS3_SHAPER_BS_U_DEF,
365                                                    HNS3_SHAPER_BS_S_DEF);
366
367         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
368
369         return hns3_cmd_send(hw, &desc, 1);
370 }
371
372 static int
373 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
374                          uint8_t pg_id, uint32_t shapping_para)
375 {
376         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
377         enum hns3_opcode_type opcode;
378         struct hns3_cmd_desc desc;
379
380         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
381                  HNS3_OPC_TM_PG_C_SHAPPING;
382         hns3_cmd_setup_basic_desc(&desc, opcode, false);
383
384         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
385
386         shap_cfg_cmd->pg_id = pg_id;
387
388         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
389
390         return hns3_cmd_send(hw, &desc, 1);
391 }
392
393 static int
394 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
395 {
396         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
397         struct hns3_shaper_parameter shaper_parameter;
398         struct hns3_pf *pf = &hns->pf;
399         uint32_t ir_u, ir_b, ir_s;
400         uint32_t shaper_para;
401         uint8_t i;
402         int ret;
403
404         /* Cfg pg schd */
405         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
406                 return -EINVAL;
407
408         /* Pg to pri */
409         for (i = 0; i < hw->dcb_info.num_pg; i++) {
410                 /* Calc shaper para */
411                 ret = hns3_shaper_para_calc(hw,
412                                             hw->dcb_info.pg_info[i].bw_limit,
413                                             HNS3_SHAPER_LVL_PG,
414                                             &shaper_parameter);
415                 if (ret) {
416                         hns3_err(hw, "calculate shaper parameter failed: %d",
417                                  ret);
418                         return ret;
419                 }
420
421                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
422                                                          HNS3_SHAPER_BS_U_DEF,
423                                                          HNS3_SHAPER_BS_S_DEF);
424
425                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
426                                                shaper_para);
427                 if (ret) {
428                         hns3_err(hw,
429                                  "config PG CIR shaper parameter failed: %d",
430                                  ret);
431                         return ret;
432                 }
433
434                 ir_b = shaper_parameter.ir_b;
435                 ir_u = shaper_parameter.ir_u;
436                 ir_s = shaper_parameter.ir_s;
437                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
438                                                          HNS3_SHAPER_BS_U_DEF,
439                                                          HNS3_SHAPER_BS_S_DEF);
440
441                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
442                                                shaper_para);
443                 if (ret) {
444                         hns3_err(hw,
445                                  "config PG PIR shaper parameter failed: %d",
446                                  ret);
447                         return ret;
448                 }
449         }
450
451         return 0;
452 }
453
454 static int
455 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
456 {
457         struct hns3_cmd_desc desc;
458
459         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
460
461         if (mode == HNS3_SCH_MODE_DWRR)
462                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
463         else
464                 desc.data[1] = 0;
465
466         desc.data[0] = rte_cpu_to_le_32(qs_id);
467
468         return hns3_cmd_send(hw, &desc, 1);
469 }
470
471 static int
472 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
473 {
474         struct hns3_cmd_desc desc;
475
476         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
477
478         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
479                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
480         else
481                 desc.data[1] = 0;
482
483         desc.data[0] = rte_cpu_to_le_32(pri_id);
484
485         return hns3_cmd_send(hw, &desc, 1);
486 }
487
488 static int
489 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
490                           uint8_t pri_id, uint32_t shapping_para)
491 {
492         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
493         enum hns3_opcode_type opcode;
494         struct hns3_cmd_desc desc;
495
496         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
497                  HNS3_OPC_TM_PRI_C_SHAPPING;
498
499         hns3_cmd_setup_basic_desc(&desc, opcode, false);
500
501         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
502
503         shap_cfg_cmd->pri_id = pri_id;
504
505         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
506
507         return hns3_cmd_send(hw, &desc, 1);
508 }
509
510 static int
511 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
512 {
513         struct hns3_shaper_parameter shaper_parameter;
514         uint32_t ir_u, ir_b, ir_s;
515         uint32_t shaper_para;
516         int ret, i;
517
518         for (i = 0; i < hw->dcb_info.num_tc; i++) {
519                 ret = hns3_shaper_para_calc(hw,
520                                             hw->dcb_info.tc_info[i].bw_limit,
521                                             HNS3_SHAPER_LVL_PRI,
522                                             &shaper_parameter);
523                 if (ret) {
524                         hns3_err(hw, "calculate shaper parameter failed: %d",
525                                  ret);
526                         return ret;
527                 }
528
529                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
530                                                          HNS3_SHAPER_BS_U_DEF,
531                                                          HNS3_SHAPER_BS_S_DEF);
532
533                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
534                                                 shaper_para);
535                 if (ret) {
536                         hns3_err(hw,
537                                  "config priority CIR shaper parameter failed: %d",
538                                  ret);
539                         return ret;
540                 }
541
542                 ir_b = shaper_parameter.ir_b;
543                 ir_u = shaper_parameter.ir_u;
544                 ir_s = shaper_parameter.ir_s;
545                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
546                                                          HNS3_SHAPER_BS_U_DEF,
547                                                          HNS3_SHAPER_BS_S_DEF);
548
549                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
550                                                 shaper_para);
551                 if (ret) {
552                         hns3_err(hw,
553                                  "config priority PIR shaper parameter failed: %d",
554                                  ret);
555                         return ret;
556                 }
557         }
558
559         return 0;
560 }
561
562
563 static int
564 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
565 {
566         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
567         struct hns3_pf *pf = &hns->pf;
568         int ret;
569
570         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
571                 return -EINVAL;
572
573         ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
574         if (ret)
575                 hns3_err(hw, "config port shaper failed: %d", ret);
576
577         return ret;
578 }
579
580 void
581 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
582 {
583         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
584         uint16_t rx_qnum_per_tc;
585         int i;
586
587         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
588         rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
589         if (hw->alloc_rss_size != rx_qnum_per_tc) {
590                 hns3_info(hw, "rss size changes from %u to %u",
591                           hw->alloc_rss_size, rx_qnum_per_tc);
592                 hw->alloc_rss_size = rx_qnum_per_tc;
593         }
594         hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
595
596         /*
597          * When rss size is changed, we need to update rss redirection table
598          * maintained by driver. Besides, during the entire reset process, we
599          * need to ensure that the rss table information are not overwritten
600          * and configured directly to the hardware in the RESET_STAGE_RESTORE
601          * stage of the reset process.
602          */
603         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
604                 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
605                         rss_cfg->rss_indirection_tbl[i] =
606                                                         i % hw->alloc_rss_size;
607         }
608 }
609
610 void
611 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
612 {
613         struct hns3_tc_queue_info *tc_queue;
614         uint8_t i;
615
616         hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
617         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
618                 tc_queue = &hw->tc_queue[i];
619                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
620                         tc_queue->enable = true;
621                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
622                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
623                         tc_queue->tc = i;
624                 } else {
625                         /* Set to default queue if TC is disable */
626                         tc_queue->enable = false;
627                         tc_queue->tqp_offset = 0;
628                         tc_queue->tqp_count = 0;
629                         tc_queue->tc = 0;
630                 }
631         }
632         hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
633 }
634
635 static void
636 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
637                                  uint16_t nb_tx_q)
638 {
639         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
640         struct hns3_pf *pf = &hns->pf;
641
642         hw->num_tc = hw->dcb_info.num_tc;
643         hns3_set_rss_size(hw, nb_rx_q);
644         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
645
646         if (!hns->is_vf)
647                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
648 }
649
650 int
651 hns3_dcb_info_init(struct hns3_hw *hw)
652 {
653         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
654         struct hns3_pf *pf = &hns->pf;
655         int i, k;
656
657         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
658             hw->dcb_info.num_pg != 1)
659                 return -EINVAL;
660
661         /* Initializing PG information */
662         memset(hw->dcb_info.pg_info, 0,
663                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
664         for (i = 0; i < hw->dcb_info.num_pg; i++) {
665                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
666                 hw->dcb_info.pg_info[i].pg_id = i;
667                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
668                 hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;
669
670                 if (i != 0)
671                         continue;
672
673                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
674                 for (k = 0; k < hw->dcb_info.num_tc; k++)
675                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
676         }
677
678         /* All UPs mapping to TC0 */
679         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
680                 hw->dcb_info.prio_tc[i] = 0;
681
682         /* Initializing tc information */
683         memset(hw->dcb_info.tc_info, 0,
684                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
685         for (i = 0; i < hw->dcb_info.num_tc; i++) {
686                 hw->dcb_info.tc_info[i].tc_id = i;
687                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
688                 hw->dcb_info.tc_info[i].pgid = 0;
689                 hw->dcb_info.tc_info[i].bw_limit =
690                         hw->dcb_info.pg_info[0].bw_limit;
691         }
692
693         return 0;
694 }
695
696 static int
697 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
698 {
699         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
700         struct hns3_pf *pf = &hns->pf;
701         int ret, i;
702
703         /* Only being config on TC-Based scheduler mode */
704         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
705                 return -EINVAL;
706
707         for (i = 0; i < hw->dcb_info.num_pg; i++) {
708                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
709                 if (ret)
710                         return ret;
711         }
712
713         return 0;
714 }
715
716 static int
717 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
718 {
719         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
720         struct hns3_pf *pf = &hns->pf;
721         uint8_t i;
722         int ret;
723
724         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
725                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
726                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
727                         if (ret)
728                                 return ret;
729
730                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
731                                                         HNS3_SCH_MODE_DWRR);
732                         if (ret)
733                                 return ret;
734                 }
735         }
736
737         return 0;
738 }
739
740 static int
741 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
742 {
743         int ret;
744
745         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
746         if (ret) {
747                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
748                 return ret;
749         }
750
751         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
752         if (ret)
753                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
754
755         return ret;
756 }
757
758 static int
759 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
760 {
761         struct hns3_pg_info *pg_info;
762         uint8_t dwrr;
763         int ret, i;
764
765         for (i = 0; i < hw->dcb_info.num_tc; i++) {
766                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
767                 dwrr = pg_info->tc_dwrr[i];
768
769                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
770                 if (ret) {
771                         hns3_err(hw,
772                                "fail to send priority weight cmd: %d, ret = %d",
773                                i, ret);
774                         return ret;
775                 }
776
777                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
778                 if (ret) {
779                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
780                                  i, ret);
781                         return ret;
782                 }
783         }
784
785         return 0;
786 }
787
788 static int
789 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
790 {
791         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
792         struct hns3_pf *pf = &hns->pf;
793         uint32_t version;
794         int ret;
795
796         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
797                 return -EINVAL;
798
799         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
800         if (ret)
801                 return ret;
802
803         if (!hns3_dev_dcb_supported(hw))
804                 return 0;
805
806         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
807         if (ret == -EOPNOTSUPP) {
808                 version = hw->fw_version;
809                 hns3_warn(hw,
810                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
811                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
812                                          HNS3_FW_VERSION_BYTE3_S),
813                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
814                                          HNS3_FW_VERSION_BYTE2_S),
815                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
816                                          HNS3_FW_VERSION_BYTE1_S),
817                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
818                                          HNS3_FW_VERSION_BYTE0_S));
819                 ret = 0;
820         }
821
822         return ret;
823 }
824
825 static int
826 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
827 {
828         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
829         struct hns3_pf *pf = &hns->pf;
830         int ret, i;
831
832         /* Cfg pg schd */
833         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
834                 return -EINVAL;
835
836         /* Cfg pg to prio */
837         for (i = 0; i < hw->dcb_info.num_pg; i++) {
838                 /* Cfg dwrr */
839                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
840                 if (ret)
841                         return ret;
842         }
843
844         return 0;
845 }
846
847 static int
848 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
849 {
850         int ret;
851
852         ret = hns3_dcb_pg_dwrr_cfg(hw);
853         if (ret) {
854                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
855                 return ret;
856         }
857
858         ret = hns3_dcb_pri_dwrr_cfg(hw);
859         if (ret)
860                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
861
862         return ret;
863 }
864
865 static int
866 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
867 {
868         int ret;
869
870         ret = hns3_dcb_port_shaper_cfg(hw);
871         if (ret) {
872                 hns3_err(hw, "config port shaper failed: %d", ret);
873                 return ret;
874         }
875
876         ret = hns3_dcb_pg_shaper_cfg(hw);
877         if (ret) {
878                 hns3_err(hw, "config pg shaper failed: %d", ret);
879                 return ret;
880         }
881
882         return hns3_dcb_pri_shaper_cfg(hw);
883 }
884
885 static int
886 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
887 {
888         struct hns3_nq_to_qs_link_cmd *map;
889         struct hns3_cmd_desc desc;
890
891         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
892
893         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
894
895         map->nq_id = rte_cpu_to_le_16(q_id);
896         map->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
897
898         return hns3_cmd_send(hw, &desc, 1);
899 }
900
901 static int
902 hns3_q_to_qs_map(struct hns3_hw *hw)
903 {
904         struct hns3_tc_queue_info *tc_queue;
905         uint16_t q_id;
906         uint32_t i, j;
907         int ret;
908
909         for (i = 0; i < hw->num_tc; i++) {
910                 tc_queue = &hw->tc_queue[i];
911                 for (j = 0; j < tc_queue->tqp_count; j++) {
912                         q_id = tc_queue->tqp_offset + j;
913                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
914                         if (ret)
915                                 return ret;
916                 }
917         }
918
919         return 0;
920 }
921
922 static int
923 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
924 {
925         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
926         struct hns3_pf *pf = &hns->pf;
927         uint32_t i;
928         int ret;
929
930         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
931                 return -EINVAL;
932
933         /* Cfg qs -> pri mapping */
934         for (i = 0; i < hw->num_tc; i++) {
935                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
936                 if (ret) {
937                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
938                         return ret;
939                 }
940         }
941
942         /* Cfg q -> qs mapping */
943         ret = hns3_q_to_qs_map(hw);
944         if (ret)
945                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
946
947         return ret;
948 }
949
950 static int
951 hns3_dcb_map_cfg(struct hns3_hw *hw)
952 {
953         int ret;
954
955         ret = hns3_up_to_tc_map(hw);
956         if (ret) {
957                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
958                 return ret;
959         }
960
961         ret = hns3_pg_to_pri_map(hw);
962         if (ret) {
963                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
964                 return ret;
965         }
966
967         return hns3_pri_q_qs_cfg(hw);
968 }
969
970 static int
971 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
972 {
973         int ret;
974
975         /* Cfg dcb mapping  */
976         ret = hns3_dcb_map_cfg(hw);
977         if (ret)
978                 return ret;
979
980         /* Cfg dcb shaper */
981         ret = hns3_dcb_shaper_cfg(hw);
982         if (ret)
983                 return ret;
984
985         /* Cfg dwrr */
986         ret = hns3_dcb_dwrr_cfg(hw);
987         if (ret)
988                 return ret;
989
990         /* Cfg schd mode for each level schd */
991         return hns3_dcb_schd_mode_cfg(hw);
992 }
993
994 static int
995 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
996                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
997 {
998         struct hns3_cfg_pause_param_cmd *pause_param;
999         struct hns3_cmd_desc desc;
1000
1001         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1002
1003         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1004
1005         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1006         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1007         pause_param->pause_trans_gap = pause_trans_gap;
1008         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1009
1010         return hns3_cmd_send(hw, &desc, 1);
1011 }
1012
1013 int
1014 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1015 {
1016         struct hns3_cfg_pause_param_cmd *pause_param;
1017         struct hns3_cmd_desc desc;
1018         uint16_t trans_time;
1019         uint8_t trans_gap;
1020         int ret;
1021
1022         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1023
1024         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1025
1026         ret = hns3_cmd_send(hw, &desc, 1);
1027         if (ret)
1028                 return ret;
1029
1030         trans_gap = pause_param->pause_trans_gap;
1031         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1032
1033         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1034 }
1035
1036 static int
1037 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1038 {
1039 #define PAUSE_TIME_DIV_BY       2
1040 #define PAUSE_TIME_MIN_VALUE    0x4
1041
1042         struct hns3_mac *mac = &hw->mac;
1043         uint8_t pause_trans_gap;
1044
1045         /*
1046          * Pause transmit gap must be less than "pause_time / 2", otherwise
1047          * the behavior of MAC is undefined.
1048          */
1049         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1050                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1051         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1052                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1053                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1054         else {
1055                 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1056                 pause_time = PAUSE_TIME_MIN_VALUE;
1057                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1058         }
1059
1060         return hns3_pause_param_cfg(hw, mac->mac_addr,
1061                                     pause_trans_gap, pause_time);
1062 }
1063
1064 static int
1065 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1066 {
1067         struct hns3_cmd_desc desc;
1068
1069         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1070
1071         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1072                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1073
1074         return hns3_cmd_send(hw, &desc, 1);
1075 }
1076
1077 static int
1078 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1079 {
1080         struct hns3_cmd_desc desc;
1081         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1082
1083         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1084
1085         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1086                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1087
1088         pfc->pri_en_bitmap = pfc_bitmap;
1089
1090         return hns3_cmd_send(hw, &desc, 1);
1091 }
1092
1093 static int
1094 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1095 {
1096         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1097         struct hns3_cmd_desc desc;
1098
1099         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1100
1101         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1102
1103         bp_to_qs_map_cmd->tc_id = tc;
1104         bp_to_qs_map_cmd->qs_group_id = grp_id;
1105         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1106
1107         return hns3_cmd_send(hw, &desc, 1);
1108 }
1109
1110 static void
1111 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1112 {
1113         switch (hw->current_mode) {
1114         case HNS3_FC_NONE:
1115                 *tx_en = false;
1116                 *rx_en = false;
1117                 break;
1118         case HNS3_FC_RX_PAUSE:
1119                 *tx_en = false;
1120                 *rx_en = true;
1121                 break;
1122         case HNS3_FC_TX_PAUSE:
1123                 *tx_en = true;
1124                 *rx_en = false;
1125                 break;
1126         case HNS3_FC_FULL:
1127                 *tx_en = true;
1128                 *rx_en = true;
1129                 break;
1130         default:
1131                 *tx_en = false;
1132                 *rx_en = false;
1133                 break;
1134         }
1135 }
1136
1137 static int
1138 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1139 {
1140         bool tx_en, rx_en;
1141
1142         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1143                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1144         else {
1145                 tx_en = false;
1146                 rx_en = false;
1147         }
1148
1149         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1150 }
1151
1152 static int
1153 hns3_pfc_setup_hw(struct hns3_hw *hw)
1154 {
1155         bool tx_en, rx_en;
1156
1157         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1158                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1159         else {
1160                 tx_en = false;
1161                 rx_en = false;
1162         }
1163
1164         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1165 }
1166
1167 /*
1168  * Each Tc has a 1024 queue sets to backpress, it divides to
1169  * 32 group, each group contains 32 queue sets, which can be
1170  * represented by uint32_t bitmap.
1171  */
1172 static int
1173 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1174 {
1175         uint32_t qs_bitmap;
1176         int ret;
1177         int i;
1178
1179         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1180                 uint8_t grp, sub_grp;
1181                 qs_bitmap = 0;
1182
1183                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1184                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1185                                          HNS3_BP_SUB_GRP_ID_S);
1186                 if (i == grp)
1187                         qs_bitmap |= (1 << sub_grp);
1188
1189                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1190                 if (ret)
1191                         return ret;
1192         }
1193
1194         return 0;
1195 }
1196
1197 static int
1198 hns3_dcb_bp_setup(struct hns3_hw *hw)
1199 {
1200         int ret, i;
1201
1202         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1203                 ret = hns3_bp_setup_hw(hw, i);
1204                 if (ret)
1205                         return ret;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1213 {
1214         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1215         struct hns3_pf *pf = &hns->pf;
1216         int ret;
1217
1218         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1219         if (ret) {
1220                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1221                 return ret;
1222         }
1223
1224         ret = hns3_mac_pause_setup_hw(hw);
1225         if (ret) {
1226                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1227                 return ret;
1228         }
1229
1230         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1231         if (!hns3_dev_dcb_supported(hw))
1232                 return 0;
1233
1234         ret = hns3_pfc_setup_hw(hw);
1235         if (ret) {
1236                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1237                 return ret;
1238         }
1239
1240         return hns3_dcb_bp_setup(hw);
1241 }
1242
1243 static uint8_t
1244 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1245 {
1246         uint8_t pfc_map = 0;
1247         uint8_t *prio_tc;
1248         uint8_t i, j;
1249
1250         prio_tc = hw->dcb_info.prio_tc;
1251         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1252                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1253                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1254                                 pfc_map |= BIT(i);
1255                                 break;
1256                         }
1257                 }
1258         }
1259
1260         return pfc_map;
1261 }
1262
1263 static void
1264 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1265 {
1266         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1267         struct hns3_hw *hw = &hns->hw;
1268         uint8_t max_tc = 0;
1269         uint8_t pfc_en;
1270         int i;
1271
1272         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1273         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1274                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1275                         *changed = true;
1276
1277                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1278                         max_tc = dcb_rx_conf->dcb_tc[i];
1279         }
1280         *tc = max_tc + 1;
1281         if (*tc != hw->dcb_info.num_tc)
1282                 *changed = true;
1283
1284         /*
1285          * We ensure that dcb information can be reconfigured
1286          * after the hns3_priority_flow_ctrl_set function called.
1287          */
1288         if (hw->current_mode != HNS3_FC_FULL)
1289                 *changed = true;
1290         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1291         if (hw->dcb_info.pfc_en != pfc_en)
1292                 *changed = true;
1293 }
1294
1295 static void
1296 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1297 {
1298         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1299         struct hns3_pf *pf = &hns->pf;
1300         struct hns3_hw *hw = &hns->hw;
1301         uint8_t tc_bw, bw_rest;
1302         uint8_t i, j;
1303
1304         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1305         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1306         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1307
1308         /* Config pg0 */
1309         memset(hw->dcb_info.pg_info, 0,
1310                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1311         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1312         hw->dcb_info.pg_info[0].pg_id = 0;
1313         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1314         hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;
1315         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1316
1317         /* Each tc has same bw for valid tc by default */
1318         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1319         for (i = 0; i < hw->dcb_info.num_tc; i++)
1320                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1321         /* To ensure the sum of tc_dwrr is equal to 100 */
1322         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1323         for (j = 0; j < bw_rest; j++)
1324                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1325         for (; i < dcb_rx_conf->nb_tcs; i++)
1326                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1327
1328         /* All tcs map to pg0 */
1329         memset(hw->dcb_info.tc_info, 0,
1330                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1331         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1332                 hw->dcb_info.tc_info[i].tc_id = i;
1333                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1334                 hw->dcb_info.tc_info[i].pgid = 0;
1335                 hw->dcb_info.tc_info[i].bw_limit =
1336                                         hw->dcb_info.pg_info[0].bw_limit;
1337         }
1338
1339         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1340                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1341
1342         hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1343                                          hw->data->nb_tx_queues);
1344 }
1345
1346 static int
1347 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1348 {
1349         struct hns3_pf *pf = &hns->pf;
1350         struct hns3_hw *hw = &hns->hw;
1351         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1352         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1353         uint8_t bit_map = 0;
1354         uint8_t i;
1355
1356         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1357             hw->dcb_info.num_pg != 1)
1358                 return -EINVAL;
1359
1360         if (nb_rx_q < num_tc) {
1361                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1362                          nb_rx_q, num_tc);
1363                 return -EINVAL;
1364         }
1365
1366         if (nb_tx_q < num_tc) {
1367                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1368                          nb_tx_q, num_tc);
1369                 return -EINVAL;
1370         }
1371
1372         /* Currently not support uncontinuous tc */
1373         hw->dcb_info.num_tc = num_tc;
1374         for (i = 0; i < hw->dcb_info.num_tc; i++)
1375                 bit_map |= BIT(i);
1376
1377         if (!bit_map) {
1378                 bit_map = 1;
1379                 hw->dcb_info.num_tc = 1;
1380         }
1381         hw->hw_tc_map = bit_map;
1382         hns3_dcb_info_cfg(hns);
1383
1384         return 0;
1385 }
1386
1387 static int
1388 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1389 {
1390         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1391         struct hns3_pf *pf = &hns->pf;
1392         struct hns3_hw *hw = &hns->hw;
1393         enum hns3_fc_status fc_status = hw->current_fc_status;
1394         enum hns3_fc_mode current_mode = hw->current_mode;
1395         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1396         int ret, status;
1397
1398         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1399             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1400                 return -ENOTSUP;
1401
1402         ret = hns3_dcb_schd_setup_hw(hw);
1403         if (ret) {
1404                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1405                 return ret;
1406         }
1407
1408         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1409                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1410                 if (dcb_rx_conf->nb_tcs == 0)
1411                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1412                 else
1413                         hw->dcb_info.pfc_en =
1414                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1415
1416                 hw->dcb_info.hw_pfc_map =
1417                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1418
1419                 ret = hns3_buffer_alloc(hw);
1420                 if (ret)
1421                         return ret;
1422
1423                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1424                 hw->current_mode = HNS3_FC_FULL;
1425                 ret = hns3_dcb_pause_setup_hw(hw);
1426                 if (ret) {
1427                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1428                         goto pfc_setup_fail;
1429                 }
1430         } else {
1431                 /*
1432                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1433                  * flag, the DCB information is configured, such as tc numbers.
1434                  * Therefore, refreshing the allocation of packet buffer is
1435                  * necessary.
1436                  */
1437                 ret = hns3_buffer_alloc(hw);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443
1444 pfc_setup_fail:
1445         hw->current_mode = current_mode;
1446         hw->current_fc_status = fc_status;
1447         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1448         status = hns3_buffer_alloc(hw);
1449         if (status)
1450                 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1451
1452         return ret;
1453 }
1454
1455 /*
1456  * hns3_dcb_configure - setup dcb related config
1457  * @hns: pointer to hns3 adapter
1458  * Returns 0 on success, negative value on failure.
1459  */
1460 int
1461 hns3_dcb_configure(struct hns3_adapter *hns)
1462 {
1463         struct hns3_hw *hw = &hns->hw;
1464         bool map_changed = false;
1465         uint8_t num_tc = 0;
1466         int ret;
1467
1468         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1469         if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1470                 ret = hns3_dcb_info_update(hns, num_tc);
1471                 if (ret) {
1472                         hns3_err(hw, "dcb info update failed: %d", ret);
1473                         return ret;
1474                 }
1475
1476                 ret = hns3_dcb_hw_configure(hns);
1477                 if (ret) {
1478                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1479                         return ret;
1480                 }
1481         }
1482
1483         return 0;
1484 }
1485
1486 int
1487 hns3_dcb_init_hw(struct hns3_hw *hw)
1488 {
1489         int ret;
1490
1491         ret = hns3_dcb_schd_setup_hw(hw);
1492         if (ret) {
1493                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1494                 return ret;
1495         }
1496
1497         ret = hns3_dcb_pause_setup_hw(hw);
1498         if (ret)
1499                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1500
1501         return ret;
1502 }
1503
1504 int
1505 hns3_dcb_init(struct hns3_hw *hw)
1506 {
1507         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1508         struct hns3_pf *pf = &hns->pf;
1509         int ret;
1510
1511         PMD_INIT_FUNC_TRACE();
1512
1513         /*
1514          * According to the 'adapter_state' identifier, the following branch
1515          * is only executed to initialize default configurations of dcb during
1516          * the initializing driver process. Due to driver saving dcb-related
1517          * information before reset triggered, the reinit dev stage of the
1518          * reset process can not access to the branch, or those information
1519          * will be changed.
1520          */
1521         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1522                 hw->requested_mode = HNS3_FC_NONE;
1523                 hw->current_mode = hw->requested_mode;
1524                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1525                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1526
1527                 ret = hns3_dcb_info_init(hw);
1528                 if (ret) {
1529                         hns3_err(hw, "dcb info init failed: %d", ret);
1530                         return ret;
1531                 }
1532                 hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
1533                                                  hw->tqps_num);
1534         }
1535
1536         /*
1537          * DCB hardware will be configured by following the function during
1538          * the initializing driver process and the reset process. However,
1539          * driver will restore directly configurations of dcb hardware based
1540          * on dcb-related information soft maintained when driver
1541          * initialization has finished and reset is coming.
1542          */
1543         ret = hns3_dcb_init_hw(hw);
1544         if (ret) {
1545                 hns3_err(hw, "dcb init hardware failed: %d", ret);
1546                 return ret;
1547         }
1548
1549         return 0;
1550 }
1551
1552 static int
1553 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1554 {
1555         struct hns3_hw *hw = &hns->hw;
1556         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1557         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1558         int ret;
1559
1560         hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1561         ret = hns3_q_to_qs_map(hw);
1562         if (ret)
1563                 hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
1564
1565         return ret;
1566 }
1567
1568 int
1569 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1570 {
1571         struct hns3_hw *hw = &hns->hw;
1572         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1573         int ret;
1574
1575         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1576                 ret = hns3_dcb_configure(hns);
1577                 if (ret)
1578                         hns3_err(hw, "Failed to config dcb: %d", ret);
1579         } else {
1580                 /*
1581                  * Update queue map without PFC configuration,
1582                  * due to queues reconfigured by user.
1583                  */
1584                 ret = hns3_update_queue_map_configure(hns);
1585                 if (ret)
1586                         hns3_err(hw,
1587                                  "Failed to update queue mapping configure: %d",
1588                                  ret);
1589         }
1590
1591         return ret;
1592 }
1593
1594 /*
1595  * hns3_dcb_pfc_enable - Enable priority flow control
1596  * @dev: pointer to ethernet device
1597  *
1598  * Configures the pfc settings for one porority.
1599  */
1600 int
1601 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1602 {
1603         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1605         enum hns3_fc_status fc_status = hw->current_fc_status;
1606         enum hns3_fc_mode current_mode = hw->current_mode;
1607         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1608         uint8_t pfc_en = hw->dcb_info.pfc_en;
1609         uint8_t priority = pfc_conf->priority;
1610         uint16_t pause_time = pf->pause_time;
1611         int ret, status;
1612
1613         pf->pause_time = pfc_conf->fc.pause_time;
1614         hw->current_mode = hw->requested_mode;
1615         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1616         hw->dcb_info.pfc_en |= BIT(priority);
1617         hw->dcb_info.hw_pfc_map =
1618                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1619         ret = hns3_buffer_alloc(hw);
1620         if (ret)
1621                 goto pfc_setup_fail;
1622
1623         /*
1624          * The flow control mode of all UPs will be changed based on
1625          * current_mode coming from user.
1626          */
1627         ret = hns3_dcb_pause_setup_hw(hw);
1628         if (ret) {
1629                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1630                 goto pfc_setup_fail;
1631         }
1632
1633         return 0;
1634
1635 pfc_setup_fail:
1636         hw->current_mode = current_mode;
1637         hw->current_fc_status = fc_status;
1638         pf->pause_time = pause_time;
1639         hw->dcb_info.pfc_en = pfc_en;
1640         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1641         status = hns3_buffer_alloc(hw);
1642         if (status)
1643                 hns3_err(hw, "recover packet buffer fail: %d", status);
1644
1645         return ret;
1646 }
1647
1648 /*
1649  * hns3_fc_enable - Enable MAC pause
1650  * @dev: pointer to ethernet device
1651  *
1652  * Configures the MAC pause settings.
1653  */
1654 int
1655 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1656 {
1657         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1659         enum hns3_fc_status fc_status = hw->current_fc_status;
1660         enum hns3_fc_mode current_mode = hw->current_mode;
1661         uint16_t pause_time = pf->pause_time;
1662         int ret;
1663
1664         pf->pause_time = fc_conf->pause_time;
1665         hw->current_mode = hw->requested_mode;
1666
1667         /*
1668          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1669          * of flow control is configured to be HNS3_FC_NONE.
1670          */
1671         if (hw->current_mode == HNS3_FC_NONE)
1672                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1673         else
1674                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1675
1676         ret = hns3_dcb_pause_setup_hw(hw);
1677         if (ret) {
1678                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1679                 goto setup_fc_fail;
1680         }
1681
1682         return 0;
1683
1684 setup_fc_fail:
1685         hw->current_mode = current_mode;
1686         hw->current_fc_status = fc_status;
1687         pf->pause_time = pause_time;
1688
1689         return ret;
1690 }