net/hns3: get device specifications from firmware
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <rte_io.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
17 #include "hns3_dcb.h"
18
19 #define HNS3_SHAPER_BS_U_DEF    5
20 #define HNS3_SHAPER_BS_S_DEF    20
21 #define BW_MAX_PERCENT          100
22
23 /*
24  * hns3_shaper_para_calc: calculate ir parameter for the shaper
25  * @ir: Rate to be config, its unit is Mbps
26  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
27  * @shaper_para: shaper parameter of IR shaper
28  *
29  * the formula:
30  *
31  *              IR_b * (2 ^ IR_u) * 8
32  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
33  *              Tick * (2 ^ IR_s)
34  *
35  * @return: 0: calculate sucessful, negative: fail
36  */
37 static int
38 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
39                       struct hns3_shaper_parameter *shaper_para)
40 {
41 #define SHAPER_DEFAULT_IR_B     126
42 #define DIVISOR_CLK             (1000 * 8)
43 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
44
45         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
46                 6 * 256,    /* Prioriy level */
47                 6 * 32,     /* Prioriy group level */
48                 6 * 8,      /* Port level */
49                 6 * 256     /* Qset level */
50         };
51         uint8_t ir_u_calc = 0;
52         uint8_t ir_s_calc = 0;
53         uint32_t denominator;
54         uint32_t ir_calc;
55         uint32_t tick;
56
57         /* Calc tick */
58         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
59                 hns3_err(hw,
60                          "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
61                          shaper_level, HNS3_SHAPER_LVL_CNT);
62                 return -EINVAL;
63         }
64
65         if (ir > HNS3_ETHER_MAX_RATE) {
66                 hns3_err(hw, "rate(%d) exceeds the rate driver supported "
67                          "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE);
68                 return -EINVAL;
69         }
70
71         tick = tick_array[shaper_level];
72
73         /*
74          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
75          * the formula is changed to:
76          *              126 * 1 * 8
77          * ir_calc = ---------------- * 1000
78          *              tick * 1
79          */
80         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
81
82         if (ir_calc == ir) {
83                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
84         } else if (ir_calc > ir) {
85                 /* Increasing the denominator to select ir_s value */
86                 do {
87                         ir_s_calc++;
88                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
89                 } while (ir_calc > ir);
90
91                 if (ir_calc == ir)
92                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
93                 else
94                         shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
95                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
96         } else {
97                 /*
98                  * Increasing the numerator to select ir_u value. ir_u_calc will
99                  * get maximum value when ir_calc is minimum and ir is maximum.
100                  * ir_calc gets minimum value when tick is the maximum value.
101                  * At the same time, value of ir_u_calc can only be increased up
102                  * to eight after the while loop if the value of ir is equal
103                  * to HNS3_ETHER_MAX_RATE.
104                  */
105                 uint32_t numerator;
106                 do {
107                         ir_u_calc++;
108                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
109                         ir_calc = (numerator + (tick >> 1)) / tick;
110                 } while (ir_calc < ir);
111
112                 if (ir_calc == ir) {
113                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
114                 } else {
115                         --ir_u_calc;
116
117                         /*
118                          * The maximum value of ir_u_calc in this branch is
119                          * seven in all cases. Thus, value of denominator can
120                          * not be zero here.
121                          */
122                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
123                         shaper_para->ir_b =
124                                 (ir * tick + (denominator >> 1)) / denominator;
125                 }
126         }
127
128         shaper_para->ir_u = ir_u_calc;
129         shaper_para->ir_s = ir_s_calc;
130
131         return 0;
132 }
133
134 static int
135 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
136 {
137 #define HNS3_HALF_BYTE_BIT_OFFSET 4
138         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
139
140         if (tc >= hw->dcb_info.num_tc)
141                 return -EINVAL;
142
143         /*
144          * The register for priority has four bytes, the first bytes includes
145          *  priority0 and priority1, the higher 4bit stands for priority1
146          *  while the lower 4bit stands for priority0, as below:
147          * first byte:  | pri_1 | pri_0 |
148          * second byte: | pri_3 | pri_2 |
149          * third byte:  | pri_5 | pri_4 |
150          * fourth byte: | pri_7 | pri_6 |
151          */
152         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
153
154         return 0;
155 }
156
157 static int
158 hns3_up_to_tc_map(struct hns3_hw *hw)
159 {
160         struct hns3_cmd_desc desc;
161         uint8_t *pri = (uint8_t *)desc.data;
162         uint8_t pri_id;
163         int ret;
164
165         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
166
167         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
168                 ret = hns3_fill_pri_array(hw, pri, pri_id);
169                 if (ret)
170                         return ret;
171         }
172
173         return hns3_cmd_send(hw, &desc, 1);
174 }
175
176 static int
177 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
178 {
179         struct hns3_pg_to_pri_link_cmd *map;
180         struct hns3_cmd_desc desc;
181
182         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
183
184         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
185
186         map->pg_id = pg_id;
187         map->pri_bit_map = pri_bit_map;
188
189         return hns3_cmd_send(hw, &desc, 1);
190 }
191
192 static int
193 hns3_pg_to_pri_map(struct hns3_hw *hw)
194 {
195         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
196         struct hns3_pf *pf = &hns->pf;
197         struct hns3_pg_info *pg_info;
198         int ret, i;
199
200         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
201                 return -EINVAL;
202
203         for (i = 0; i < hw->dcb_info.num_pg; i++) {
204                 /* Cfg pg to priority mapping */
205                 pg_info = &hw->dcb_info.pg_info[i];
206                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
207                 if (ret)
208                         return ret;
209         }
210
211         return 0;
212 }
213
214 static int
215 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
216 {
217         struct hns3_qs_to_pri_link_cmd *map;
218         struct hns3_cmd_desc desc;
219
220         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
221
222         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
223
224         map->qs_id = rte_cpu_to_le_16(qs_id);
225         map->priority = pri;
226         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
227
228         return hns3_cmd_send(hw, &desc, 1);
229 }
230
231 static int
232 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
233 {
234         struct hns3_qs_weight_cmd *weight;
235         struct hns3_cmd_desc desc;
236
237         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
238
239         weight = (struct hns3_qs_weight_cmd *)desc.data;
240
241         weight->qs_id = rte_cpu_to_le_16(qs_id);
242         weight->dwrr = dwrr;
243
244         return hns3_cmd_send(hw, &desc, 1);
245 }
246
247 static int
248 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
249 {
250 #define DEFAULT_TC_WEIGHT       1
251 #define DEFAULT_TC_OFFSET       14
252         struct hns3_ets_tc_weight_cmd *ets_weight;
253         struct hns3_cmd_desc desc;
254         uint8_t i;
255
256         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
257         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
258
259         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
260                 struct hns3_pg_info *pg_info;
261
262                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
263
264                 if (!(hw->hw_tc_map & BIT(i)))
265                         continue;
266
267                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
268                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
269         }
270
271         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
272
273         return hns3_cmd_send(hw, &desc, 1);
274 }
275
276 static int
277 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
278 {
279         struct hns3_priority_weight_cmd *weight;
280         struct hns3_cmd_desc desc;
281
282         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
283
284         weight = (struct hns3_priority_weight_cmd *)desc.data;
285
286         weight->pri_id = pri_id;
287         weight->dwrr = dwrr;
288
289         return hns3_cmd_send(hw, &desc, 1);
290 }
291
292 static int
293 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
294 {
295         struct hns3_pg_weight_cmd *weight;
296         struct hns3_cmd_desc desc;
297
298         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
299
300         weight = (struct hns3_pg_weight_cmd *)desc.data;
301
302         weight->pg_id = pg_id;
303         weight->dwrr = dwrr;
304
305         return hns3_cmd_send(hw, &desc, 1);
306 }
307 static int
308 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
309 {
310         struct hns3_cmd_desc desc;
311
312         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
313
314         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
315                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
316         else
317                 desc.data[1] = 0;
318
319         desc.data[0] = rte_cpu_to_le_32(pg_id);
320
321         return hns3_cmd_send(hw, &desc, 1);
322 }
323
324 static uint32_t
325 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
326                            uint8_t bs_b, uint8_t bs_s)
327 {
328         uint32_t shapping_para = 0;
329
330         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
331         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
332         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
333         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
334         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
335
336         return shapping_para;
337 }
338
339 static int
340 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
341 {
342         struct hns3_port_shapping_cmd *shap_cfg_cmd;
343         struct hns3_shaper_parameter shaper_parameter;
344         uint32_t shapping_para;
345         uint32_t ir_u, ir_b, ir_s;
346         struct hns3_cmd_desc desc;
347         int ret;
348
349         ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
350                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
351         if (ret) {
352                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
353                 return ret;
354         }
355
356         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
357         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
358
359         ir_b = shaper_parameter.ir_b;
360         ir_u = shaper_parameter.ir_u;
361         ir_s = shaper_parameter.ir_s;
362         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
363                                                    HNS3_SHAPER_BS_U_DEF,
364                                                    HNS3_SHAPER_BS_S_DEF);
365
366         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
367
368         return hns3_cmd_send(hw, &desc, 1);
369 }
370
371 static int
372 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
373                          uint8_t pg_id, uint32_t shapping_para)
374 {
375         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
376         enum hns3_opcode_type opcode;
377         struct hns3_cmd_desc desc;
378
379         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
380                  HNS3_OPC_TM_PG_C_SHAPPING;
381         hns3_cmd_setup_basic_desc(&desc, opcode, false);
382
383         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
384
385         shap_cfg_cmd->pg_id = pg_id;
386
387         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
388
389         return hns3_cmd_send(hw, &desc, 1);
390 }
391
392 static int
393 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
394 {
395         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
396         struct hns3_shaper_parameter shaper_parameter;
397         struct hns3_pf *pf = &hns->pf;
398         uint32_t ir_u, ir_b, ir_s;
399         uint32_t shaper_para;
400         uint8_t i;
401         int ret;
402
403         /* Cfg pg schd */
404         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
405                 return -EINVAL;
406
407         /* Pg to pri */
408         for (i = 0; i < hw->dcb_info.num_pg; i++) {
409                 /* Calc shaper para */
410                 ret = hns3_shaper_para_calc(hw,
411                                             hw->dcb_info.pg_info[i].bw_limit,
412                                             HNS3_SHAPER_LVL_PG,
413                                             &shaper_parameter);
414                 if (ret) {
415                         hns3_err(hw, "calculate shaper parameter failed: %d",
416                                  ret);
417                         return ret;
418                 }
419
420                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
421                                                          HNS3_SHAPER_BS_U_DEF,
422                                                          HNS3_SHAPER_BS_S_DEF);
423
424                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
425                                                shaper_para);
426                 if (ret) {
427                         hns3_err(hw,
428                                  "config PG CIR shaper parameter failed: %d",
429                                  ret);
430                         return ret;
431                 }
432
433                 ir_b = shaper_parameter.ir_b;
434                 ir_u = shaper_parameter.ir_u;
435                 ir_s = shaper_parameter.ir_s;
436                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
437                                                          HNS3_SHAPER_BS_U_DEF,
438                                                          HNS3_SHAPER_BS_S_DEF);
439
440                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
441                                                shaper_para);
442                 if (ret) {
443                         hns3_err(hw,
444                                  "config PG PIR shaper parameter failed: %d",
445                                  ret);
446                         return ret;
447                 }
448         }
449
450         return 0;
451 }
452
453 static int
454 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
455 {
456         struct hns3_cmd_desc desc;
457
458         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
459
460         if (mode == HNS3_SCH_MODE_DWRR)
461                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
462         else
463                 desc.data[1] = 0;
464
465         desc.data[0] = rte_cpu_to_le_32(qs_id);
466
467         return hns3_cmd_send(hw, &desc, 1);
468 }
469
470 static int
471 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
472 {
473         struct hns3_cmd_desc desc;
474
475         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
476
477         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
478                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
479         else
480                 desc.data[1] = 0;
481
482         desc.data[0] = rte_cpu_to_le_32(pri_id);
483
484         return hns3_cmd_send(hw, &desc, 1);
485 }
486
487 static int
488 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
489                           uint8_t pri_id, uint32_t shapping_para)
490 {
491         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
492         enum hns3_opcode_type opcode;
493         struct hns3_cmd_desc desc;
494
495         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
496                  HNS3_OPC_TM_PRI_C_SHAPPING;
497
498         hns3_cmd_setup_basic_desc(&desc, opcode, false);
499
500         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
501
502         shap_cfg_cmd->pri_id = pri_id;
503
504         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
505
506         return hns3_cmd_send(hw, &desc, 1);
507 }
508
509 static int
510 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
511 {
512         struct hns3_shaper_parameter shaper_parameter;
513         uint32_t ir_u, ir_b, ir_s;
514         uint32_t shaper_para;
515         int ret, i;
516
517         for (i = 0; i < hw->dcb_info.num_tc; i++) {
518                 ret = hns3_shaper_para_calc(hw,
519                                             hw->dcb_info.tc_info[i].bw_limit,
520                                             HNS3_SHAPER_LVL_PRI,
521                                             &shaper_parameter);
522                 if (ret) {
523                         hns3_err(hw, "calculate shaper parameter failed: %d",
524                                  ret);
525                         return ret;
526                 }
527
528                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
529                                                          HNS3_SHAPER_BS_U_DEF,
530                                                          HNS3_SHAPER_BS_S_DEF);
531
532                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
533                                                 shaper_para);
534                 if (ret) {
535                         hns3_err(hw,
536                                  "config priority CIR shaper parameter failed: %d",
537                                  ret);
538                         return ret;
539                 }
540
541                 ir_b = shaper_parameter.ir_b;
542                 ir_u = shaper_parameter.ir_u;
543                 ir_s = shaper_parameter.ir_s;
544                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
545                                                          HNS3_SHAPER_BS_U_DEF,
546                                                          HNS3_SHAPER_BS_S_DEF);
547
548                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
549                                                 shaper_para);
550                 if (ret) {
551                         hns3_err(hw,
552                                  "config priority PIR shaper parameter failed: %d",
553                                  ret);
554                         return ret;
555                 }
556         }
557
558         return 0;
559 }
560
561
562 static int
563 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
564 {
565         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
566         struct hns3_pf *pf = &hns->pf;
567         int ret;
568
569         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
570                 return -EINVAL;
571
572         ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
573         if (ret)
574                 hns3_err(hw, "config port shaper failed: %d", ret);
575
576         return ret;
577 }
578
579 void
580 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
581 {
582         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
583         uint16_t rx_qnum_per_tc;
584         int i;
585
586         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
587         rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
588         if (hw->alloc_rss_size != rx_qnum_per_tc) {
589                 hns3_info(hw, "rss size changes from %u to %u",
590                           hw->alloc_rss_size, rx_qnum_per_tc);
591                 hw->alloc_rss_size = rx_qnum_per_tc;
592         }
593         hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
594
595         /*
596          * When rss size is changed, we need to update rss redirection table
597          * maintained by driver. Besides, during the entire reset process, we
598          * need to ensure that the rss table information are not overwritten
599          * and configured directly to the hardware in the RESET_STAGE_RESTORE
600          * stage of the reset process.
601          */
602         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
603                 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
604                         rss_cfg->rss_indirection_tbl[i] =
605                                                         i % hw->alloc_rss_size;
606         }
607 }
608
609 void
610 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
611 {
612         struct hns3_tc_queue_info *tc_queue;
613         uint8_t i;
614
615         hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
616         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
617                 tc_queue = &hw->tc_queue[i];
618                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
619                         tc_queue->enable = true;
620                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
621                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
622                         tc_queue->tc = i;
623                 } else {
624                         /* Set to default queue if TC is disable */
625                         tc_queue->enable = false;
626                         tc_queue->tqp_offset = 0;
627                         tc_queue->tqp_count = 0;
628                         tc_queue->tc = 0;
629                 }
630         }
631         hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
632 }
633
634 static void
635 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
636                                  uint16_t nb_tx_q)
637 {
638         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
639         struct hns3_pf *pf = &hns->pf;
640
641         hw->num_tc = hw->dcb_info.num_tc;
642         hns3_set_rss_size(hw, nb_rx_q);
643         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
644
645         if (!hns->is_vf)
646                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
647 }
648
649 int
650 hns3_dcb_info_init(struct hns3_hw *hw)
651 {
652         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
653         struct hns3_pf *pf = &hns->pf;
654         int i, k;
655
656         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
657             hw->dcb_info.num_pg != 1)
658                 return -EINVAL;
659
660         /* Initializing PG information */
661         memset(hw->dcb_info.pg_info, 0,
662                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
663         for (i = 0; i < hw->dcb_info.num_pg; i++) {
664                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
665                 hw->dcb_info.pg_info[i].pg_id = i;
666                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
667                 hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;
668
669                 if (i != 0)
670                         continue;
671
672                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
673                 for (k = 0; k < hw->dcb_info.num_tc; k++)
674                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
675         }
676
677         /* All UPs mapping to TC0 */
678         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
679                 hw->dcb_info.prio_tc[i] = 0;
680
681         /* Initializing tc information */
682         memset(hw->dcb_info.tc_info, 0,
683                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
684         for (i = 0; i < hw->dcb_info.num_tc; i++) {
685                 hw->dcb_info.tc_info[i].tc_id = i;
686                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
687                 hw->dcb_info.tc_info[i].pgid = 0;
688                 hw->dcb_info.tc_info[i].bw_limit =
689                         hw->dcb_info.pg_info[0].bw_limit;
690         }
691
692         return 0;
693 }
694
695 static int
696 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
697 {
698         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
699         struct hns3_pf *pf = &hns->pf;
700         int ret, i;
701
702         /* Only being config on TC-Based scheduler mode */
703         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
704                 return -EINVAL;
705
706         for (i = 0; i < hw->dcb_info.num_pg; i++) {
707                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
708                 if (ret)
709                         return ret;
710         }
711
712         return 0;
713 }
714
715 static int
716 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
717 {
718         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
719         struct hns3_pf *pf = &hns->pf;
720         uint8_t i;
721         int ret;
722
723         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
724                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
725                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
726                         if (ret)
727                                 return ret;
728
729                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
730                                                         HNS3_SCH_MODE_DWRR);
731                         if (ret)
732                                 return ret;
733                 }
734         }
735
736         return 0;
737 }
738
739 static int
740 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
741 {
742         int ret;
743
744         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
745         if (ret) {
746                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
747                 return ret;
748         }
749
750         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
751         if (ret)
752                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
753
754         return ret;
755 }
756
757 static int
758 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
759 {
760         struct hns3_pg_info *pg_info;
761         uint8_t dwrr;
762         int ret, i;
763
764         for (i = 0; i < hw->dcb_info.num_tc; i++) {
765                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
766                 dwrr = pg_info->tc_dwrr[i];
767
768                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
769                 if (ret) {
770                         hns3_err(hw,
771                                "fail to send priority weight cmd: %d, ret = %d",
772                                i, ret);
773                         return ret;
774                 }
775
776                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
777                 if (ret) {
778                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
779                                  i, ret);
780                         return ret;
781                 }
782         }
783
784         return 0;
785 }
786
787 static int
788 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
789 {
790         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
791         struct hns3_pf *pf = &hns->pf;
792         uint32_t version;
793         int ret;
794
795         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
796                 return -EINVAL;
797
798         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
799         if (ret)
800                 return ret;
801
802         if (!hns3_dev_dcb_supported(hw))
803                 return 0;
804
805         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
806         if (ret == -EOPNOTSUPP) {
807                 version = hw->fw_version;
808                 hns3_warn(hw,
809                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
810                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
811                                          HNS3_FW_VERSION_BYTE3_S),
812                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
813                                          HNS3_FW_VERSION_BYTE2_S),
814                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
815                                          HNS3_FW_VERSION_BYTE1_S),
816                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
817                                          HNS3_FW_VERSION_BYTE0_S));
818                 ret = 0;
819         }
820
821         return ret;
822 }
823
824 static int
825 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
826 {
827         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
828         struct hns3_pf *pf = &hns->pf;
829         int ret, i;
830
831         /* Cfg pg schd */
832         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
833                 return -EINVAL;
834
835         /* Cfg pg to prio */
836         for (i = 0; i < hw->dcb_info.num_pg; i++) {
837                 /* Cfg dwrr */
838                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
839                 if (ret)
840                         return ret;
841         }
842
843         return 0;
844 }
845
846 static int
847 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
848 {
849         int ret;
850
851         ret = hns3_dcb_pg_dwrr_cfg(hw);
852         if (ret) {
853                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
854                 return ret;
855         }
856
857         ret = hns3_dcb_pri_dwrr_cfg(hw);
858         if (ret)
859                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
860
861         return ret;
862 }
863
864 static int
865 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
866 {
867         int ret;
868
869         ret = hns3_dcb_port_shaper_cfg(hw);
870         if (ret) {
871                 hns3_err(hw, "config port shaper failed: %d", ret);
872                 return ret;
873         }
874
875         ret = hns3_dcb_pg_shaper_cfg(hw);
876         if (ret) {
877                 hns3_err(hw, "config pg shaper failed: %d", ret);
878                 return ret;
879         }
880
881         return hns3_dcb_pri_shaper_cfg(hw);
882 }
883
884 static int
885 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
886 {
887         struct hns3_nq_to_qs_link_cmd *map;
888         struct hns3_cmd_desc desc;
889
890         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
891
892         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
893
894         map->nq_id = rte_cpu_to_le_16(q_id);
895         map->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
896
897         return hns3_cmd_send(hw, &desc, 1);
898 }
899
900 static int
901 hns3_q_to_qs_map(struct hns3_hw *hw)
902 {
903         struct hns3_tc_queue_info *tc_queue;
904         uint16_t q_id;
905         uint32_t i, j;
906         int ret;
907
908         for (i = 0; i < hw->num_tc; i++) {
909                 tc_queue = &hw->tc_queue[i];
910                 for (j = 0; j < tc_queue->tqp_count; j++) {
911                         q_id = tc_queue->tqp_offset + j;
912                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
913                         if (ret)
914                                 return ret;
915                 }
916         }
917
918         return 0;
919 }
920
921 static int
922 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
923 {
924         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
925         struct hns3_pf *pf = &hns->pf;
926         uint32_t i;
927         int ret;
928
929         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
930                 return -EINVAL;
931
932         /* Cfg qs -> pri mapping */
933         for (i = 0; i < hw->num_tc; i++) {
934                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
935                 if (ret) {
936                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
937                         return ret;
938                 }
939         }
940
941         /* Cfg q -> qs mapping */
942         ret = hns3_q_to_qs_map(hw);
943         if (ret)
944                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
945
946         return ret;
947 }
948
949 static int
950 hns3_dcb_map_cfg(struct hns3_hw *hw)
951 {
952         int ret;
953
954         ret = hns3_up_to_tc_map(hw);
955         if (ret) {
956                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
957                 return ret;
958         }
959
960         ret = hns3_pg_to_pri_map(hw);
961         if (ret) {
962                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
963                 return ret;
964         }
965
966         return hns3_pri_q_qs_cfg(hw);
967 }
968
969 static int
970 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
971 {
972         int ret;
973
974         /* Cfg dcb mapping  */
975         ret = hns3_dcb_map_cfg(hw);
976         if (ret)
977                 return ret;
978
979         /* Cfg dcb shaper */
980         ret = hns3_dcb_shaper_cfg(hw);
981         if (ret)
982                 return ret;
983
984         /* Cfg dwrr */
985         ret = hns3_dcb_dwrr_cfg(hw);
986         if (ret)
987                 return ret;
988
989         /* Cfg schd mode for each level schd */
990         return hns3_dcb_schd_mode_cfg(hw);
991 }
992
993 static int
994 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
995                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
996 {
997         struct hns3_cfg_pause_param_cmd *pause_param;
998         struct hns3_cmd_desc desc;
999
1000         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1001
1002         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1003
1004         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1005         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1006         pause_param->pause_trans_gap = pause_trans_gap;
1007         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1008
1009         return hns3_cmd_send(hw, &desc, 1);
1010 }
1011
1012 int
1013 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1014 {
1015         struct hns3_cfg_pause_param_cmd *pause_param;
1016         struct hns3_cmd_desc desc;
1017         uint16_t trans_time;
1018         uint8_t trans_gap;
1019         int ret;
1020
1021         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1022
1023         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1024
1025         ret = hns3_cmd_send(hw, &desc, 1);
1026         if (ret)
1027                 return ret;
1028
1029         trans_gap = pause_param->pause_trans_gap;
1030         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1031
1032         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1033 }
1034
1035 static int
1036 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1037 {
1038 #define PAUSE_TIME_DIV_BY       2
1039 #define PAUSE_TIME_MIN_VALUE    0x4
1040
1041         struct hns3_mac *mac = &hw->mac;
1042         uint8_t pause_trans_gap;
1043
1044         /*
1045          * Pause transmit gap must be less than "pause_time / 2", otherwise
1046          * the behavior of MAC is undefined.
1047          */
1048         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1049                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1050         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1051                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1052                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1053         else {
1054                 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1055                 pause_time = PAUSE_TIME_MIN_VALUE;
1056                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1057         }
1058
1059         return hns3_pause_param_cfg(hw, mac->mac_addr,
1060                                     pause_trans_gap, pause_time);
1061 }
1062
1063 static int
1064 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1065 {
1066         struct hns3_cmd_desc desc;
1067
1068         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1069
1070         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1071                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1072
1073         return hns3_cmd_send(hw, &desc, 1);
1074 }
1075
1076 static int
1077 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1078 {
1079         struct hns3_cmd_desc desc;
1080         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1081
1082         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1083
1084         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1085                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1086
1087         pfc->pri_en_bitmap = pfc_bitmap;
1088
1089         return hns3_cmd_send(hw, &desc, 1);
1090 }
1091
1092 static int
1093 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1094 {
1095         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1096         struct hns3_cmd_desc desc;
1097
1098         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1099
1100         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1101
1102         bp_to_qs_map_cmd->tc_id = tc;
1103         bp_to_qs_map_cmd->qs_group_id = grp_id;
1104         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1105
1106         return hns3_cmd_send(hw, &desc, 1);
1107 }
1108
1109 static void
1110 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1111 {
1112         switch (hw->current_mode) {
1113         case HNS3_FC_NONE:
1114                 *tx_en = false;
1115                 *rx_en = false;
1116                 break;
1117         case HNS3_FC_RX_PAUSE:
1118                 *tx_en = false;
1119                 *rx_en = true;
1120                 break;
1121         case HNS3_FC_TX_PAUSE:
1122                 *tx_en = true;
1123                 *rx_en = false;
1124                 break;
1125         case HNS3_FC_FULL:
1126                 *tx_en = true;
1127                 *rx_en = true;
1128                 break;
1129         default:
1130                 *tx_en = false;
1131                 *rx_en = false;
1132                 break;
1133         }
1134 }
1135
1136 static int
1137 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1138 {
1139         bool tx_en, rx_en;
1140
1141         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1142                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1143         else {
1144                 tx_en = false;
1145                 rx_en = false;
1146         }
1147
1148         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1149 }
1150
1151 static int
1152 hns3_pfc_setup_hw(struct hns3_hw *hw)
1153 {
1154         bool tx_en, rx_en;
1155
1156         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1157                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1158         else {
1159                 tx_en = false;
1160                 rx_en = false;
1161         }
1162
1163         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1164 }
1165
1166 /*
1167  * Each Tc has a 1024 queue sets to backpress, it divides to
1168  * 32 group, each group contains 32 queue sets, which can be
1169  * represented by uint32_t bitmap.
1170  */
1171 static int
1172 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1173 {
1174         uint32_t qs_bitmap;
1175         int ret;
1176         int i;
1177
1178         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1179                 uint8_t grp, sub_grp;
1180                 qs_bitmap = 0;
1181
1182                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1183                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1184                                          HNS3_BP_SUB_GRP_ID_S);
1185                 if (i == grp)
1186                         qs_bitmap |= (1 << sub_grp);
1187
1188                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1189                 if (ret)
1190                         return ret;
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int
1197 hns3_dcb_bp_setup(struct hns3_hw *hw)
1198 {
1199         int ret, i;
1200
1201         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1202                 ret = hns3_bp_setup_hw(hw, i);
1203                 if (ret)
1204                         return ret;
1205         }
1206
1207         return 0;
1208 }
1209
1210 static int
1211 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1212 {
1213         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1214         struct hns3_pf *pf = &hns->pf;
1215         int ret;
1216
1217         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1218         if (ret) {
1219                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1220                 return ret;
1221         }
1222
1223         ret = hns3_mac_pause_setup_hw(hw);
1224         if (ret) {
1225                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1226                 return ret;
1227         }
1228
1229         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1230         if (!hns3_dev_dcb_supported(hw))
1231                 return 0;
1232
1233         ret = hns3_pfc_setup_hw(hw);
1234         if (ret) {
1235                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1236                 return ret;
1237         }
1238
1239         return hns3_dcb_bp_setup(hw);
1240 }
1241
1242 static uint8_t
1243 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1244 {
1245         uint8_t pfc_map = 0;
1246         uint8_t *prio_tc;
1247         uint8_t i, j;
1248
1249         prio_tc = hw->dcb_info.prio_tc;
1250         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1251                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1252                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1253                                 pfc_map |= BIT(i);
1254                                 break;
1255                         }
1256                 }
1257         }
1258
1259         return pfc_map;
1260 }
1261
1262 static void
1263 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1264 {
1265         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1266         struct hns3_hw *hw = &hns->hw;
1267         uint8_t max_tc = 0;
1268         uint8_t pfc_en;
1269         int i;
1270
1271         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1272         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1273                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1274                         *changed = true;
1275
1276                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1277                         max_tc = dcb_rx_conf->dcb_tc[i];
1278         }
1279         *tc = max_tc + 1;
1280         if (*tc != hw->dcb_info.num_tc)
1281                 *changed = true;
1282
1283         /*
1284          * We ensure that dcb information can be reconfigured
1285          * after the hns3_priority_flow_ctrl_set function called.
1286          */
1287         if (hw->current_mode != HNS3_FC_FULL)
1288                 *changed = true;
1289         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1290         if (hw->dcb_info.pfc_en != pfc_en)
1291                 *changed = true;
1292 }
1293
1294 static void
1295 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1296 {
1297         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1298         struct hns3_pf *pf = &hns->pf;
1299         struct hns3_hw *hw = &hns->hw;
1300         uint8_t tc_bw, bw_rest;
1301         uint8_t i, j;
1302
1303         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1304         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1305         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1306
1307         /* Config pg0 */
1308         memset(hw->dcb_info.pg_info, 0,
1309                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1310         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1311         hw->dcb_info.pg_info[0].pg_id = 0;
1312         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1313         hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;
1314         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1315
1316         /* Each tc has same bw for valid tc by default */
1317         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1318         for (i = 0; i < hw->dcb_info.num_tc; i++)
1319                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1320         /* To ensure the sum of tc_dwrr is equal to 100 */
1321         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1322         for (j = 0; j < bw_rest; j++)
1323                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1324         for (; i < dcb_rx_conf->nb_tcs; i++)
1325                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1326
1327         /* All tcs map to pg0 */
1328         memset(hw->dcb_info.tc_info, 0,
1329                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1330         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1331                 hw->dcb_info.tc_info[i].tc_id = i;
1332                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1333                 hw->dcb_info.tc_info[i].pgid = 0;
1334                 hw->dcb_info.tc_info[i].bw_limit =
1335                                         hw->dcb_info.pg_info[0].bw_limit;
1336         }
1337
1338         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1339                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1340
1341         hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1342                                          hw->data->nb_tx_queues);
1343 }
1344
1345 static int
1346 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1347 {
1348         struct hns3_pf *pf = &hns->pf;
1349         struct hns3_hw *hw = &hns->hw;
1350         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1351         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1352         uint8_t bit_map = 0;
1353         uint8_t i;
1354
1355         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1356             hw->dcb_info.num_pg != 1)
1357                 return -EINVAL;
1358
1359         if (nb_rx_q < num_tc) {
1360                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1361                          nb_rx_q, num_tc);
1362                 return -EINVAL;
1363         }
1364
1365         if (nb_tx_q < num_tc) {
1366                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1367                          nb_tx_q, num_tc);
1368                 return -EINVAL;
1369         }
1370
1371         /* Currently not support uncontinuous tc */
1372         hw->dcb_info.num_tc = num_tc;
1373         for (i = 0; i < hw->dcb_info.num_tc; i++)
1374                 bit_map |= BIT(i);
1375
1376         if (!bit_map) {
1377                 bit_map = 1;
1378                 hw->dcb_info.num_tc = 1;
1379         }
1380         hw->hw_tc_map = bit_map;
1381         hns3_dcb_info_cfg(hns);
1382
1383         return 0;
1384 }
1385
1386 static int
1387 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1388 {
1389         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1390         struct hns3_pf *pf = &hns->pf;
1391         struct hns3_hw *hw = &hns->hw;
1392         enum hns3_fc_status fc_status = hw->current_fc_status;
1393         enum hns3_fc_mode current_mode = hw->current_mode;
1394         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1395         int ret, status;
1396
1397         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1398             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1399                 return -ENOTSUP;
1400
1401         ret = hns3_dcb_schd_setup_hw(hw);
1402         if (ret) {
1403                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1404                 return ret;
1405         }
1406
1407         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1408                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1409                 if (dcb_rx_conf->nb_tcs == 0)
1410                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1411                 else
1412                         hw->dcb_info.pfc_en =
1413                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1414
1415                 hw->dcb_info.hw_pfc_map =
1416                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1417
1418                 ret = hns3_buffer_alloc(hw);
1419                 if (ret)
1420                         return ret;
1421
1422                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1423                 hw->current_mode = HNS3_FC_FULL;
1424                 ret = hns3_dcb_pause_setup_hw(hw);
1425                 if (ret) {
1426                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1427                         goto pfc_setup_fail;
1428                 }
1429         } else {
1430                 /*
1431                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1432                  * flag, the DCB information is configured, such as tc numbers.
1433                  * Therefore, refreshing the allocation of packet buffer is
1434                  * necessary.
1435                  */
1436                 ret = hns3_buffer_alloc(hw);
1437                 if (ret)
1438                         return ret;
1439         }
1440
1441         return 0;
1442
1443 pfc_setup_fail:
1444         hw->current_mode = current_mode;
1445         hw->current_fc_status = fc_status;
1446         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1447         status = hns3_buffer_alloc(hw);
1448         if (status)
1449                 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1450
1451         return ret;
1452 }
1453
1454 /*
1455  * hns3_dcb_configure - setup dcb related config
1456  * @hns: pointer to hns3 adapter
1457  * Returns 0 on success, negative value on failure.
1458  */
1459 int
1460 hns3_dcb_configure(struct hns3_adapter *hns)
1461 {
1462         struct hns3_hw *hw = &hns->hw;
1463         bool map_changed = false;
1464         uint8_t num_tc = 0;
1465         int ret;
1466
1467         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1468         if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1469                 ret = hns3_dcb_info_update(hns, num_tc);
1470                 if (ret) {
1471                         hns3_err(hw, "dcb info update failed: %d", ret);
1472                         return ret;
1473                 }
1474
1475                 ret = hns3_dcb_hw_configure(hns);
1476                 if (ret) {
1477                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1478                         return ret;
1479                 }
1480         }
1481
1482         return 0;
1483 }
1484
1485 int
1486 hns3_dcb_init_hw(struct hns3_hw *hw)
1487 {
1488         int ret;
1489
1490         ret = hns3_dcb_schd_setup_hw(hw);
1491         if (ret) {
1492                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1493                 return ret;
1494         }
1495
1496         ret = hns3_dcb_pause_setup_hw(hw);
1497         if (ret)
1498                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1499
1500         return ret;
1501 }
1502
1503 int
1504 hns3_dcb_init(struct hns3_hw *hw)
1505 {
1506         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1507         struct hns3_pf *pf = &hns->pf;
1508         int ret;
1509
1510         PMD_INIT_FUNC_TRACE();
1511
1512         /*
1513          * According to the 'adapter_state' identifier, the following branch
1514          * is only executed to initialize default configurations of dcb during
1515          * the initializing driver process. Due to driver saving dcb-related
1516          * information before reset triggered, the reinit dev stage of the
1517          * reset process can not access to the branch, or those information
1518          * will be changed.
1519          */
1520         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1521                 hw->requested_mode = HNS3_FC_NONE;
1522                 hw->current_mode = hw->requested_mode;
1523                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1524                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1525
1526                 ret = hns3_dcb_info_init(hw);
1527                 if (ret) {
1528                         hns3_err(hw, "dcb info init failed: %d", ret);
1529                         return ret;
1530                 }
1531                 hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
1532                                                  hw->tqps_num);
1533         }
1534
1535         /*
1536          * DCB hardware will be configured by following the function during
1537          * the initializing driver process and the reset process. However,
1538          * driver will restore directly configurations of dcb hardware based
1539          * on dcb-related information soft maintained when driver
1540          * initialization has finished and reset is coming.
1541          */
1542         ret = hns3_dcb_init_hw(hw);
1543         if (ret) {
1544                 hns3_err(hw, "dcb init hardware failed: %d", ret);
1545                 return ret;
1546         }
1547
1548         return 0;
1549 }
1550
1551 static int
1552 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1553 {
1554         struct hns3_hw *hw = &hns->hw;
1555         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1556         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1557         int ret;
1558
1559         hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1560         ret = hns3_q_to_qs_map(hw);
1561         if (ret)
1562                 hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
1563
1564         return ret;
1565 }
1566
1567 int
1568 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1569 {
1570         struct hns3_hw *hw = &hns->hw;
1571         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1572         int ret;
1573
1574         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1575                 ret = hns3_dcb_configure(hns);
1576                 if (ret)
1577                         hns3_err(hw, "Failed to config dcb: %d", ret);
1578         } else {
1579                 /*
1580                  * Update queue map without PFC configuration,
1581                  * due to queues reconfigured by user.
1582                  */
1583                 ret = hns3_update_queue_map_configure(hns);
1584                 if (ret)
1585                         hns3_err(hw,
1586                                  "Failed to update queue mapping configure: %d",
1587                                  ret);
1588         }
1589
1590         return ret;
1591 }
1592
1593 /*
1594  * hns3_dcb_pfc_enable - Enable priority flow control
1595  * @dev: pointer to ethernet device
1596  *
1597  * Configures the pfc settings for one porority.
1598  */
1599 int
1600 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1601 {
1602         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1604         enum hns3_fc_status fc_status = hw->current_fc_status;
1605         enum hns3_fc_mode current_mode = hw->current_mode;
1606         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1607         uint8_t pfc_en = hw->dcb_info.pfc_en;
1608         uint8_t priority = pfc_conf->priority;
1609         uint16_t pause_time = pf->pause_time;
1610         int ret, status;
1611
1612         pf->pause_time = pfc_conf->fc.pause_time;
1613         hw->current_mode = hw->requested_mode;
1614         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1615         hw->dcb_info.pfc_en |= BIT(priority);
1616         hw->dcb_info.hw_pfc_map =
1617                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1618         ret = hns3_buffer_alloc(hw);
1619         if (ret)
1620                 goto pfc_setup_fail;
1621
1622         /*
1623          * The flow control mode of all UPs will be changed based on
1624          * current_mode coming from user.
1625          */
1626         ret = hns3_dcb_pause_setup_hw(hw);
1627         if (ret) {
1628                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1629                 goto pfc_setup_fail;
1630         }
1631
1632         return 0;
1633
1634 pfc_setup_fail:
1635         hw->current_mode = current_mode;
1636         hw->current_fc_status = fc_status;
1637         pf->pause_time = pause_time;
1638         hw->dcb_info.pfc_en = pfc_en;
1639         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1640         status = hns3_buffer_alloc(hw);
1641         if (status)
1642                 hns3_err(hw, "recover packet buffer fail: %d", status);
1643
1644         return ret;
1645 }
1646
1647 /*
1648  * hns3_fc_enable - Enable MAC pause
1649  * @dev: pointer to ethernet device
1650  *
1651  * Configures the MAC pause settings.
1652  */
1653 int
1654 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1655 {
1656         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1657         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1658         enum hns3_fc_status fc_status = hw->current_fc_status;
1659         enum hns3_fc_mode current_mode = hw->current_mode;
1660         uint16_t pause_time = pf->pause_time;
1661         int ret;
1662
1663         pf->pause_time = fc_conf->pause_time;
1664         hw->current_mode = hw->requested_mode;
1665
1666         /*
1667          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1668          * of flow control is configured to be HNS3_FC_NONE.
1669          */
1670         if (hw->current_mode == HNS3_FC_NONE)
1671                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1672         else
1673                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1674
1675         ret = hns3_dcb_pause_setup_hw(hw);
1676         if (ret) {
1677                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1678                 goto setup_fc_fail;
1679         }
1680
1681         return 0;
1682
1683 setup_fc_fail:
1684         hw->current_mode = current_mode;
1685         hw->current_fc_status = fc_status;
1686         pf->pause_time = pause_time;
1687
1688         return ret;
1689 }