net/hns3: maximize queue number
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <rte_io.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
17 #include "hns3_dcb.h"
18
19 #define HNS3_SHAPER_BS_U_DEF    5
20 #define HNS3_SHAPER_BS_S_DEF    20
21 #define BW_MAX_PERCENT          100
22
23 /*
24  * hns3_shaper_para_calc: calculate ir parameter for the shaper
25  * @ir: Rate to be config, its unit is Mbps
26  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
27  * @shaper_para: shaper parameter of IR shaper
28  *
29  * the formula:
30  *
31  *              IR_b * (2 ^ IR_u) * 8
32  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
33  *              Tick * (2 ^ IR_s)
34  *
35  * @return: 0: calculate sucessful, negative: fail
36  */
37 static int
38 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
39                       struct hns3_shaper_parameter *shaper_para)
40 {
41 #define SHAPER_DEFAULT_IR_B     126
42 #define DIVISOR_CLK             (1000 * 8)
43 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
44
45         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
46                 6 * 256,    /* Prioriy level */
47                 6 * 32,     /* Prioriy group level */
48                 6 * 8,      /* Port level */
49                 6 * 256     /* Qset level */
50         };
51         uint8_t ir_u_calc = 0;
52         uint8_t ir_s_calc = 0;
53         uint32_t denominator;
54         uint32_t ir_calc;
55         uint32_t tick;
56
57         /* Calc tick */
58         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
59                 hns3_err(hw,
60                          "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
61                          shaper_level, HNS3_SHAPER_LVL_CNT);
62                 return -EINVAL;
63         }
64
65         if (ir > HNS3_ETHER_MAX_RATE) {
66                 hns3_err(hw, "rate(%d) exceeds the rate driver supported "
67                          "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE);
68                 return -EINVAL;
69         }
70
71         tick = tick_array[shaper_level];
72
73         /*
74          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
75          * the formula is changed to:
76          *              126 * 1 * 8
77          * ir_calc = ---------------- * 1000
78          *              tick * 1
79          */
80         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
81
82         if (ir_calc == ir) {
83                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
84         } else if (ir_calc > ir) {
85                 /* Increasing the denominator to select ir_s value */
86                 do {
87                         ir_s_calc++;
88                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
89                 } while (ir_calc > ir);
90
91                 if (ir_calc == ir)
92                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
93                 else
94                         shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
95                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
96         } else {
97                 /*
98                  * Increasing the numerator to select ir_u value. ir_u_calc will
99                  * get maximum value when ir_calc is minimum and ir is maximum.
100                  * ir_calc gets minimum value when tick is the maximum value.
101                  * At the same time, value of ir_u_calc can only be increased up
102                  * to eight after the while loop if the value of ir is equal
103                  * to HNS3_ETHER_MAX_RATE.
104                  */
105                 uint32_t numerator;
106                 do {
107                         ir_u_calc++;
108                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
109                         ir_calc = (numerator + (tick >> 1)) / tick;
110                 } while (ir_calc < ir);
111
112                 if (ir_calc == ir) {
113                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
114                 } else {
115                         --ir_u_calc;
116
117                         /*
118                          * The maximum value of ir_u_calc in this branch is
119                          * seven in all cases. Thus, value of denominator can
120                          * not be zero here.
121                          */
122                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
123                         shaper_para->ir_b =
124                                 (ir * tick + (denominator >> 1)) / denominator;
125                 }
126         }
127
128         shaper_para->ir_u = ir_u_calc;
129         shaper_para->ir_s = ir_s_calc;
130
131         return 0;
132 }
133
134 static int
135 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
136 {
137 #define HNS3_HALF_BYTE_BIT_OFFSET 4
138         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
139
140         if (tc >= hw->dcb_info.num_tc)
141                 return -EINVAL;
142
143         /*
144          * The register for priority has four bytes, the first bytes includes
145          *  priority0 and priority1, the higher 4bit stands for priority1
146          *  while the lower 4bit stands for priority0, as below:
147          * first byte:  | pri_1 | pri_0 |
148          * second byte: | pri_3 | pri_2 |
149          * third byte:  | pri_5 | pri_4 |
150          * fourth byte: | pri_7 | pri_6 |
151          */
152         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
153
154         return 0;
155 }
156
157 static int
158 hns3_up_to_tc_map(struct hns3_hw *hw)
159 {
160         struct hns3_cmd_desc desc;
161         uint8_t *pri = (uint8_t *)desc.data;
162         uint8_t pri_id;
163         int ret;
164
165         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
166
167         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
168                 ret = hns3_fill_pri_array(hw, pri, pri_id);
169                 if (ret)
170                         return ret;
171         }
172
173         return hns3_cmd_send(hw, &desc, 1);
174 }
175
176 static int
177 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
178 {
179         struct hns3_pg_to_pri_link_cmd *map;
180         struct hns3_cmd_desc desc;
181
182         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
183
184         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
185
186         map->pg_id = pg_id;
187         map->pri_bit_map = pri_bit_map;
188
189         return hns3_cmd_send(hw, &desc, 1);
190 }
191
192 static int
193 hns3_pg_to_pri_map(struct hns3_hw *hw)
194 {
195         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
196         struct hns3_pf *pf = &hns->pf;
197         struct hns3_pg_info *pg_info;
198         int ret, i;
199
200         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
201                 return -EINVAL;
202
203         for (i = 0; i < hw->dcb_info.num_pg; i++) {
204                 /* Cfg pg to priority mapping */
205                 pg_info = &hw->dcb_info.pg_info[i];
206                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
207                 if (ret)
208                         return ret;
209         }
210
211         return 0;
212 }
213
214 static int
215 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
216 {
217         struct hns3_qs_to_pri_link_cmd *map;
218         struct hns3_cmd_desc desc;
219
220         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
221
222         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
223
224         map->qs_id = rte_cpu_to_le_16(qs_id);
225         map->priority = pri;
226         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
227
228         return hns3_cmd_send(hw, &desc, 1);
229 }
230
231 static int
232 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
233 {
234         struct hns3_qs_weight_cmd *weight;
235         struct hns3_cmd_desc desc;
236
237         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
238
239         weight = (struct hns3_qs_weight_cmd *)desc.data;
240
241         weight->qs_id = rte_cpu_to_le_16(qs_id);
242         weight->dwrr = dwrr;
243
244         return hns3_cmd_send(hw, &desc, 1);
245 }
246
247 static int
248 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
249 {
250 #define DEFAULT_TC_WEIGHT       1
251 #define DEFAULT_TC_OFFSET       14
252         struct hns3_ets_tc_weight_cmd *ets_weight;
253         struct hns3_cmd_desc desc;
254         uint8_t i;
255
256         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
257         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
258
259         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
260                 struct hns3_pg_info *pg_info;
261
262                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
263
264                 if (!(hw->hw_tc_map & BIT(i)))
265                         continue;
266
267                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
268                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
269         }
270
271         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
272
273         return hns3_cmd_send(hw, &desc, 1);
274 }
275
276 static int
277 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
278 {
279         struct hns3_priority_weight_cmd *weight;
280         struct hns3_cmd_desc desc;
281
282         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
283
284         weight = (struct hns3_priority_weight_cmd *)desc.data;
285
286         weight->pri_id = pri_id;
287         weight->dwrr = dwrr;
288
289         return hns3_cmd_send(hw, &desc, 1);
290 }
291
292 static int
293 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
294 {
295         struct hns3_pg_weight_cmd *weight;
296         struct hns3_cmd_desc desc;
297
298         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
299
300         weight = (struct hns3_pg_weight_cmd *)desc.data;
301
302         weight->pg_id = pg_id;
303         weight->dwrr = dwrr;
304
305         return hns3_cmd_send(hw, &desc, 1);
306 }
307 static int
308 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
309 {
310         struct hns3_cmd_desc desc;
311
312         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
313
314         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
315                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
316         else
317                 desc.data[1] = 0;
318
319         desc.data[0] = rte_cpu_to_le_32(pg_id);
320
321         return hns3_cmd_send(hw, &desc, 1);
322 }
323
324 static uint32_t
325 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
326                            uint8_t bs_b, uint8_t bs_s)
327 {
328         uint32_t shapping_para = 0;
329
330         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
331         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
332         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
333         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
334         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
335
336         return shapping_para;
337 }
338
339 static int
340 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
341 {
342         struct hns3_port_shapping_cmd *shap_cfg_cmd;
343         struct hns3_shaper_parameter shaper_parameter;
344         uint32_t shapping_para;
345         uint32_t ir_u, ir_b, ir_s;
346         struct hns3_cmd_desc desc;
347         int ret;
348
349         ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
350                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
351         if (ret) {
352                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
353                 return ret;
354         }
355
356         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
357         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
358
359         ir_b = shaper_parameter.ir_b;
360         ir_u = shaper_parameter.ir_u;
361         ir_s = shaper_parameter.ir_s;
362         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
363                                                    HNS3_SHAPER_BS_U_DEF,
364                                                    HNS3_SHAPER_BS_S_DEF);
365
366         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
367
368         return hns3_cmd_send(hw, &desc, 1);
369 }
370
371 static int
372 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
373                          uint8_t pg_id, uint32_t shapping_para)
374 {
375         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
376         enum hns3_opcode_type opcode;
377         struct hns3_cmd_desc desc;
378
379         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
380                  HNS3_OPC_TM_PG_C_SHAPPING;
381         hns3_cmd_setup_basic_desc(&desc, opcode, false);
382
383         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
384
385         shap_cfg_cmd->pg_id = pg_id;
386
387         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
388
389         return hns3_cmd_send(hw, &desc, 1);
390 }
391
392 static int
393 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
394 {
395         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
396         struct hns3_shaper_parameter shaper_parameter;
397         struct hns3_pf *pf = &hns->pf;
398         uint32_t ir_u, ir_b, ir_s;
399         uint32_t shaper_para;
400         uint8_t i;
401         int ret;
402
403         /* Cfg pg schd */
404         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
405                 return -EINVAL;
406
407         /* Pg to pri */
408         for (i = 0; i < hw->dcb_info.num_pg; i++) {
409                 /* Calc shaper para */
410                 ret = hns3_shaper_para_calc(hw,
411                                             hw->dcb_info.pg_info[i].bw_limit,
412                                             HNS3_SHAPER_LVL_PG,
413                                             &shaper_parameter);
414                 if (ret) {
415                         hns3_err(hw, "calculate shaper parameter failed: %d",
416                                  ret);
417                         return ret;
418                 }
419
420                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
421                                                          HNS3_SHAPER_BS_U_DEF,
422                                                          HNS3_SHAPER_BS_S_DEF);
423
424                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
425                                                shaper_para);
426                 if (ret) {
427                         hns3_err(hw,
428                                  "config PG CIR shaper parameter failed: %d",
429                                  ret);
430                         return ret;
431                 }
432
433                 ir_b = shaper_parameter.ir_b;
434                 ir_u = shaper_parameter.ir_u;
435                 ir_s = shaper_parameter.ir_s;
436                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
437                                                          HNS3_SHAPER_BS_U_DEF,
438                                                          HNS3_SHAPER_BS_S_DEF);
439
440                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
441                                                shaper_para);
442                 if (ret) {
443                         hns3_err(hw,
444                                  "config PG PIR shaper parameter failed: %d",
445                                  ret);
446                         return ret;
447                 }
448         }
449
450         return 0;
451 }
452
453 static int
454 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
455 {
456         struct hns3_cmd_desc desc;
457
458         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
459
460         if (mode == HNS3_SCH_MODE_DWRR)
461                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
462         else
463                 desc.data[1] = 0;
464
465         desc.data[0] = rte_cpu_to_le_32(qs_id);
466
467         return hns3_cmd_send(hw, &desc, 1);
468 }
469
470 static int
471 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
472 {
473         struct hns3_cmd_desc desc;
474
475         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
476
477         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
478                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
479         else
480                 desc.data[1] = 0;
481
482         desc.data[0] = rte_cpu_to_le_32(pri_id);
483
484         return hns3_cmd_send(hw, &desc, 1);
485 }
486
487 static int
488 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
489                           uint8_t pri_id, uint32_t shapping_para)
490 {
491         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
492         enum hns3_opcode_type opcode;
493         struct hns3_cmd_desc desc;
494
495         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
496                  HNS3_OPC_TM_PRI_C_SHAPPING;
497
498         hns3_cmd_setup_basic_desc(&desc, opcode, false);
499
500         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
501
502         shap_cfg_cmd->pri_id = pri_id;
503
504         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
505
506         return hns3_cmd_send(hw, &desc, 1);
507 }
508
509 static int
510 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
511 {
512         struct hns3_shaper_parameter shaper_parameter;
513         uint32_t ir_u, ir_b, ir_s;
514         uint32_t shaper_para;
515         int ret, i;
516
517         for (i = 0; i < hw->dcb_info.num_tc; i++) {
518                 ret = hns3_shaper_para_calc(hw,
519                                             hw->dcb_info.tc_info[i].bw_limit,
520                                             HNS3_SHAPER_LVL_PRI,
521                                             &shaper_parameter);
522                 if (ret) {
523                         hns3_err(hw, "calculate shaper parameter failed: %d",
524                                  ret);
525                         return ret;
526                 }
527
528                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
529                                                          HNS3_SHAPER_BS_U_DEF,
530                                                          HNS3_SHAPER_BS_S_DEF);
531
532                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
533                                                 shaper_para);
534                 if (ret) {
535                         hns3_err(hw,
536                                  "config priority CIR shaper parameter failed: %d",
537                                  ret);
538                         return ret;
539                 }
540
541                 ir_b = shaper_parameter.ir_b;
542                 ir_u = shaper_parameter.ir_u;
543                 ir_s = shaper_parameter.ir_s;
544                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
545                                                          HNS3_SHAPER_BS_U_DEF,
546                                                          HNS3_SHAPER_BS_S_DEF);
547
548                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
549                                                 shaper_para);
550                 if (ret) {
551                         hns3_err(hw,
552                                  "config priority PIR shaper parameter failed: %d",
553                                  ret);
554                         return ret;
555                 }
556         }
557
558         return 0;
559 }
560
561
562 static int
563 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
564 {
565         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
566         struct hns3_pf *pf = &hns->pf;
567         int ret;
568
569         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
570                 return -EINVAL;
571
572         ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
573         if (ret)
574                 hns3_err(hw, "config port shaper failed: %d", ret);
575
576         return ret;
577 }
578
579 static int
580 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
581 {
582         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
583         uint16_t rx_qnum_per_tc;
584         uint16_t used_rx_queues;
585         int i;
586
587         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
588         if (rx_qnum_per_tc > hw->rss_size_max) {
589                 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
590                          "value (%u) hardware supported.",
591                          rx_qnum_per_tc, hw->rss_size_max);
592                 return -EINVAL;
593         }
594
595         used_rx_queues = hw->num_tc * rx_qnum_per_tc;
596         if (used_rx_queues != nb_rx_q) {
597                 hns3_err(hw, "rx queue number (%u) configured must be an "
598                          "integral multiple of valid tc number (%u).",
599                          nb_rx_q, hw->num_tc);
600                 return -EINVAL;
601         }
602         hw->alloc_rss_size = rx_qnum_per_tc;
603         hw->used_rx_queues = used_rx_queues;
604
605         /*
606          * When rss size is changed, we need to update rss redirection table
607          * maintained by driver. Besides, during the entire reset process, we
608          * need to ensure that the rss table information are not overwritten
609          * and configured directly to the hardware in the RESET_STAGE_RESTORE
610          * stage of the reset process.
611          */
612         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
613                 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
614                         rss_cfg->rss_indirection_tbl[i] =
615                                                         i % hw->alloc_rss_size;
616         }
617
618         return 0;
619 }
620
621 static int
622 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
623 {
624         struct hns3_tc_queue_info *tc_queue;
625         uint16_t used_tx_queues;
626         uint16_t tx_qnum_per_tc;
627         uint8_t i;
628
629         tx_qnum_per_tc = nb_tx_q / hw->num_tc;
630         used_tx_queues = hw->num_tc * tx_qnum_per_tc;
631         if (used_tx_queues != nb_tx_q) {
632                 hns3_err(hw, "tx queue number (%u) configured must be an "
633                          "integral multiple of valid tc number (%u).",
634                          nb_tx_q, hw->num_tc);
635                 return -EINVAL;
636         }
637
638         hw->used_tx_queues = used_tx_queues;
639         hw->tx_qnum_per_tc = tx_qnum_per_tc;
640         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
641                 tc_queue = &hw->tc_queue[i];
642                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
643                         tc_queue->enable = true;
644                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
645                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
646                         tc_queue->tc = i;
647                 } else {
648                         /* Set to default queue if TC is disable */
649                         tc_queue->enable = false;
650                         tc_queue->tqp_offset = 0;
651                         tc_queue->tqp_count = 0;
652                         tc_queue->tc = 0;
653                 }
654         }
655
656         return 0;
657 }
658
659 int
660 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
661 {
662         int ret;
663
664         ret = hns3_set_rss_size(hw, nb_rx_q);
665         if (ret)
666                 return ret;
667
668         return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
669 }
670
671 static int
672 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
673                                  uint16_t nb_tx_q)
674 {
675         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
676         struct hns3_pf *pf = &hns->pf;
677         int ret;
678
679         hw->num_tc = hw->dcb_info.num_tc;
680         ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
681         if (ret)
682                 return ret;
683
684         if (!hns->is_vf)
685                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
686
687         return 0;
688 }
689
690 int
691 hns3_dcb_info_init(struct hns3_hw *hw)
692 {
693         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
694         struct hns3_pf *pf = &hns->pf;
695         int i, k;
696
697         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
698             hw->dcb_info.num_pg != 1)
699                 return -EINVAL;
700
701         /* Initializing PG information */
702         memset(hw->dcb_info.pg_info, 0,
703                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
704         for (i = 0; i < hw->dcb_info.num_pg; i++) {
705                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
706                 hw->dcb_info.pg_info[i].pg_id = i;
707                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
708                 hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;
709
710                 if (i != 0)
711                         continue;
712
713                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
714                 for (k = 0; k < hw->dcb_info.num_tc; k++)
715                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
716         }
717
718         /* All UPs mapping to TC0 */
719         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
720                 hw->dcb_info.prio_tc[i] = 0;
721
722         /* Initializing tc information */
723         memset(hw->dcb_info.tc_info, 0,
724                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
725         for (i = 0; i < hw->dcb_info.num_tc; i++) {
726                 hw->dcb_info.tc_info[i].tc_id = i;
727                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
728                 hw->dcb_info.tc_info[i].pgid = 0;
729                 hw->dcb_info.tc_info[i].bw_limit =
730                         hw->dcb_info.pg_info[0].bw_limit;
731         }
732
733         return 0;
734 }
735
736 static int
737 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
738 {
739         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
740         struct hns3_pf *pf = &hns->pf;
741         int ret, i;
742
743         /* Only being config on TC-Based scheduler mode */
744         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
745                 return -EINVAL;
746
747         for (i = 0; i < hw->dcb_info.num_pg; i++) {
748                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
749                 if (ret)
750                         return ret;
751         }
752
753         return 0;
754 }
755
756 static int
757 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
758 {
759         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
760         struct hns3_pf *pf = &hns->pf;
761         uint8_t i;
762         int ret;
763
764         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
765                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
766                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
767                         if (ret)
768                                 return ret;
769
770                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
771                                                         HNS3_SCH_MODE_DWRR);
772                         if (ret)
773                                 return ret;
774                 }
775         }
776
777         return 0;
778 }
779
780 static int
781 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
782 {
783         int ret;
784
785         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
786         if (ret) {
787                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
788                 return ret;
789         }
790
791         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
792         if (ret)
793                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
794
795         return ret;
796 }
797
798 static int
799 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
800 {
801         struct hns3_pg_info *pg_info;
802         uint8_t dwrr;
803         int ret, i;
804
805         for (i = 0; i < hw->dcb_info.num_tc; i++) {
806                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
807                 dwrr = pg_info->tc_dwrr[i];
808
809                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
810                 if (ret) {
811                         hns3_err(hw,
812                                "fail to send priority weight cmd: %d, ret = %d",
813                                i, ret);
814                         return ret;
815                 }
816
817                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
818                 if (ret) {
819                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
820                                  i, ret);
821                         return ret;
822                 }
823         }
824
825         return 0;
826 }
827
828 static int
829 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
830 {
831         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
832         struct hns3_pf *pf = &hns->pf;
833         uint32_t version;
834         int ret;
835
836         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
837                 return -EINVAL;
838
839         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
840         if (ret)
841                 return ret;
842
843         if (!hns3_dev_dcb_supported(hw))
844                 return 0;
845
846         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
847         if (ret == -EOPNOTSUPP) {
848                 version = hw->fw_version;
849                 hns3_warn(hw,
850                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
851                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
852                                          HNS3_FW_VERSION_BYTE3_S),
853                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
854                                          HNS3_FW_VERSION_BYTE2_S),
855                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
856                                          HNS3_FW_VERSION_BYTE1_S),
857                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
858                                          HNS3_FW_VERSION_BYTE0_S));
859                 ret = 0;
860         }
861
862         return ret;
863 }
864
865 static int
866 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
867 {
868         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
869         struct hns3_pf *pf = &hns->pf;
870         int ret, i;
871
872         /* Cfg pg schd */
873         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
874                 return -EINVAL;
875
876         /* Cfg pg to prio */
877         for (i = 0; i < hw->dcb_info.num_pg; i++) {
878                 /* Cfg dwrr */
879                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
880                 if (ret)
881                         return ret;
882         }
883
884         return 0;
885 }
886
887 static int
888 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
889 {
890         int ret;
891
892         ret = hns3_dcb_pg_dwrr_cfg(hw);
893         if (ret) {
894                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
895                 return ret;
896         }
897
898         ret = hns3_dcb_pri_dwrr_cfg(hw);
899         if (ret)
900                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
901
902         return ret;
903 }
904
905 static int
906 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
907 {
908         int ret;
909
910         ret = hns3_dcb_port_shaper_cfg(hw);
911         if (ret) {
912                 hns3_err(hw, "config port shaper failed: %d", ret);
913                 return ret;
914         }
915
916         ret = hns3_dcb_pg_shaper_cfg(hw);
917         if (ret) {
918                 hns3_err(hw, "config pg shaper failed: %d", ret);
919                 return ret;
920         }
921
922         return hns3_dcb_pri_shaper_cfg(hw);
923 }
924
925 static int
926 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
927 {
928         struct hns3_nq_to_qs_link_cmd *map;
929         struct hns3_cmd_desc desc;
930         uint16_t tmp_qs_id = 0;
931         uint16_t qs_id_l;
932         uint16_t qs_id_h;
933
934         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
935
936         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
937
938         map->nq_id = rte_cpu_to_le_16(q_id);
939
940         /*
941          * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
942          * configure qset_id. So we need to convert qs_id to the follow
943          * format to support qset_id > 1024.
944          * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
945          *            /         / \         \
946          *           /         /   \         \
947          * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
948          *          | qs_id_h | vld | qs_id_l |
949          */
950         qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
951                                  HNS3_DCB_QS_ID_L_S);
952         qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
953                                  HNS3_DCB_QS_ID_H_S);
954         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
955                        qs_id_l);
956         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
957                        HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
958         map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
959
960         return hns3_cmd_send(hw, &desc, 1);
961 }
962
963 static int
964 hns3_q_to_qs_map(struct hns3_hw *hw)
965 {
966         struct hns3_tc_queue_info *tc_queue;
967         uint16_t q_id;
968         uint32_t i, j;
969         int ret;
970
971         for (i = 0; i < hw->num_tc; i++) {
972                 tc_queue = &hw->tc_queue[i];
973                 for (j = 0; j < tc_queue->tqp_count; j++) {
974                         q_id = tc_queue->tqp_offset + j;
975                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
976                         if (ret)
977                                 return ret;
978                 }
979         }
980
981         return 0;
982 }
983
984 static int
985 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
986 {
987         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
988         struct hns3_pf *pf = &hns->pf;
989         uint32_t i;
990         int ret;
991
992         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
993                 return -EINVAL;
994
995         /* Cfg qs -> pri mapping */
996         for (i = 0; i < hw->num_tc; i++) {
997                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
998                 if (ret) {
999                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1000                         return ret;
1001                 }
1002         }
1003
1004         /* Cfg q -> qs mapping */
1005         ret = hns3_q_to_qs_map(hw);
1006         if (ret)
1007                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1008
1009         return ret;
1010 }
1011
1012 static int
1013 hns3_dcb_map_cfg(struct hns3_hw *hw)
1014 {
1015         int ret;
1016
1017         ret = hns3_up_to_tc_map(hw);
1018         if (ret) {
1019                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1020                 return ret;
1021         }
1022
1023         ret = hns3_pg_to_pri_map(hw);
1024         if (ret) {
1025                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1026                 return ret;
1027         }
1028
1029         return hns3_pri_q_qs_cfg(hw);
1030 }
1031
1032 static int
1033 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1034 {
1035         int ret;
1036
1037         /* Cfg dcb mapping  */
1038         ret = hns3_dcb_map_cfg(hw);
1039         if (ret)
1040                 return ret;
1041
1042         /* Cfg dcb shaper */
1043         ret = hns3_dcb_shaper_cfg(hw);
1044         if (ret)
1045                 return ret;
1046
1047         /* Cfg dwrr */
1048         ret = hns3_dcb_dwrr_cfg(hw);
1049         if (ret)
1050                 return ret;
1051
1052         /* Cfg schd mode for each level schd */
1053         return hns3_dcb_schd_mode_cfg(hw);
1054 }
1055
1056 static int
1057 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1058                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
1059 {
1060         struct hns3_cfg_pause_param_cmd *pause_param;
1061         struct hns3_cmd_desc desc;
1062
1063         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1064
1065         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1066
1067         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1068         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1069         pause_param->pause_trans_gap = pause_trans_gap;
1070         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1071
1072         return hns3_cmd_send(hw, &desc, 1);
1073 }
1074
1075 int
1076 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1077 {
1078         struct hns3_cfg_pause_param_cmd *pause_param;
1079         struct hns3_cmd_desc desc;
1080         uint16_t trans_time;
1081         uint8_t trans_gap;
1082         int ret;
1083
1084         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1085
1086         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1087
1088         ret = hns3_cmd_send(hw, &desc, 1);
1089         if (ret)
1090                 return ret;
1091
1092         trans_gap = pause_param->pause_trans_gap;
1093         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1094
1095         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1096 }
1097
1098 static int
1099 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1100 {
1101 #define PAUSE_TIME_DIV_BY       2
1102 #define PAUSE_TIME_MIN_VALUE    0x4
1103
1104         struct hns3_mac *mac = &hw->mac;
1105         uint8_t pause_trans_gap;
1106
1107         /*
1108          * Pause transmit gap must be less than "pause_time / 2", otherwise
1109          * the behavior of MAC is undefined.
1110          */
1111         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1112                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1113         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1114                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1115                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1116         else {
1117                 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1118                 pause_time = PAUSE_TIME_MIN_VALUE;
1119                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1120         }
1121
1122         return hns3_pause_param_cfg(hw, mac->mac_addr,
1123                                     pause_trans_gap, pause_time);
1124 }
1125
1126 static int
1127 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1128 {
1129         struct hns3_cmd_desc desc;
1130
1131         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1132
1133         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1134                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1135
1136         return hns3_cmd_send(hw, &desc, 1);
1137 }
1138
1139 static int
1140 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1141 {
1142         struct hns3_cmd_desc desc;
1143         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1144
1145         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1146
1147         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1148                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1149
1150         pfc->pri_en_bitmap = pfc_bitmap;
1151
1152         return hns3_cmd_send(hw, &desc, 1);
1153 }
1154
1155 static int
1156 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1157 {
1158         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1159         struct hns3_cmd_desc desc;
1160
1161         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1162
1163         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1164
1165         bp_to_qs_map_cmd->tc_id = tc;
1166         bp_to_qs_map_cmd->qs_group_id = grp_id;
1167         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1168
1169         return hns3_cmd_send(hw, &desc, 1);
1170 }
1171
1172 static void
1173 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1174 {
1175         switch (hw->current_mode) {
1176         case HNS3_FC_NONE:
1177                 *tx_en = false;
1178                 *rx_en = false;
1179                 break;
1180         case HNS3_FC_RX_PAUSE:
1181                 *tx_en = false;
1182                 *rx_en = true;
1183                 break;
1184         case HNS3_FC_TX_PAUSE:
1185                 *tx_en = true;
1186                 *rx_en = false;
1187                 break;
1188         case HNS3_FC_FULL:
1189                 *tx_en = true;
1190                 *rx_en = true;
1191                 break;
1192         default:
1193                 *tx_en = false;
1194                 *rx_en = false;
1195                 break;
1196         }
1197 }
1198
1199 static int
1200 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1201 {
1202         bool tx_en, rx_en;
1203
1204         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1205                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1206         else {
1207                 tx_en = false;
1208                 rx_en = false;
1209         }
1210
1211         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1212 }
1213
1214 static int
1215 hns3_pfc_setup_hw(struct hns3_hw *hw)
1216 {
1217         bool tx_en, rx_en;
1218
1219         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1220                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1221         else {
1222                 tx_en = false;
1223                 rx_en = false;
1224         }
1225
1226         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1227 }
1228
1229 /*
1230  * Each Tc has a 1024 queue sets to backpress, it divides to
1231  * 32 group, each group contains 32 queue sets, which can be
1232  * represented by uint32_t bitmap.
1233  */
1234 static int
1235 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1236 {
1237         uint32_t qs_bitmap;
1238         int ret;
1239         int i;
1240
1241         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1242                 uint8_t grp, sub_grp;
1243                 qs_bitmap = 0;
1244
1245                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1246                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1247                                          HNS3_BP_SUB_GRP_ID_S);
1248                 if (i == grp)
1249                         qs_bitmap |= (1 << sub_grp);
1250
1251                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1252                 if (ret)
1253                         return ret;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int
1260 hns3_dcb_bp_setup(struct hns3_hw *hw)
1261 {
1262         int ret, i;
1263
1264         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1265                 ret = hns3_bp_setup_hw(hw, i);
1266                 if (ret)
1267                         return ret;
1268         }
1269
1270         return 0;
1271 }
1272
1273 static int
1274 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1275 {
1276         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1277         struct hns3_pf *pf = &hns->pf;
1278         int ret;
1279
1280         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1281         if (ret) {
1282                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1283                 return ret;
1284         }
1285
1286         ret = hns3_mac_pause_setup_hw(hw);
1287         if (ret) {
1288                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1289                 return ret;
1290         }
1291
1292         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1293         if (!hns3_dev_dcb_supported(hw))
1294                 return 0;
1295
1296         ret = hns3_pfc_setup_hw(hw);
1297         if (ret) {
1298                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1299                 return ret;
1300         }
1301
1302         return hns3_dcb_bp_setup(hw);
1303 }
1304
1305 static uint8_t
1306 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1307 {
1308         uint8_t pfc_map = 0;
1309         uint8_t *prio_tc;
1310         uint8_t i, j;
1311
1312         prio_tc = hw->dcb_info.prio_tc;
1313         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1314                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1315                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1316                                 pfc_map |= BIT(i);
1317                                 break;
1318                         }
1319                 }
1320         }
1321
1322         return pfc_map;
1323 }
1324
1325 static void
1326 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1327 {
1328         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1329         struct hns3_hw *hw = &hns->hw;
1330         uint8_t max_tc = 0;
1331         uint8_t pfc_en;
1332         int i;
1333
1334         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1335         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1336                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1337                         *changed = true;
1338
1339                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1340                         max_tc = dcb_rx_conf->dcb_tc[i];
1341         }
1342         *tc = max_tc + 1;
1343         if (*tc != hw->dcb_info.num_tc)
1344                 *changed = true;
1345
1346         /*
1347          * We ensure that dcb information can be reconfigured
1348          * after the hns3_priority_flow_ctrl_set function called.
1349          */
1350         if (hw->current_mode != HNS3_FC_FULL)
1351                 *changed = true;
1352         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1353         if (hw->dcb_info.pfc_en != pfc_en)
1354                 *changed = true;
1355 }
1356
1357 static int
1358 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1359 {
1360         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1361         struct hns3_pf *pf = &hns->pf;
1362         struct hns3_hw *hw = &hns->hw;
1363         uint8_t tc_bw, bw_rest;
1364         uint8_t i, j;
1365         int ret;
1366
1367         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1368         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1369         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1370
1371         /* Config pg0 */
1372         memset(hw->dcb_info.pg_info, 0,
1373                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1374         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1375         hw->dcb_info.pg_info[0].pg_id = 0;
1376         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1377         hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;
1378         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1379
1380         /* Each tc has same bw for valid tc by default */
1381         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1382         for (i = 0; i < hw->dcb_info.num_tc; i++)
1383                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1384         /* To ensure the sum of tc_dwrr is equal to 100 */
1385         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1386         for (j = 0; j < bw_rest; j++)
1387                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1388         for (; i < dcb_rx_conf->nb_tcs; i++)
1389                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1390
1391         /* All tcs map to pg0 */
1392         memset(hw->dcb_info.tc_info, 0,
1393                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1394         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1395                 hw->dcb_info.tc_info[i].tc_id = i;
1396                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1397                 hw->dcb_info.tc_info[i].pgid = 0;
1398                 hw->dcb_info.tc_info[i].bw_limit =
1399                                         hw->dcb_info.pg_info[0].bw_limit;
1400         }
1401
1402         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1403                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1404
1405         ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1406                                                hw->data->nb_tx_queues);
1407         if (ret)
1408                 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1409
1410         return ret;
1411 }
1412
1413 static int
1414 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1415 {
1416         struct hns3_pf *pf = &hns->pf;
1417         struct hns3_hw *hw = &hns->hw;
1418         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1419         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1420         uint8_t bit_map = 0;
1421         uint8_t i;
1422
1423         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1424             hw->dcb_info.num_pg != 1)
1425                 return -EINVAL;
1426
1427         if (nb_rx_q < num_tc) {
1428                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1429                          nb_rx_q, num_tc);
1430                 return -EINVAL;
1431         }
1432
1433         if (nb_tx_q < num_tc) {
1434                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1435                          nb_tx_q, num_tc);
1436                 return -EINVAL;
1437         }
1438
1439         /* Currently not support uncontinuous tc */
1440         hw->dcb_info.num_tc = num_tc;
1441         for (i = 0; i < hw->dcb_info.num_tc; i++)
1442                 bit_map |= BIT(i);
1443
1444         if (!bit_map) {
1445                 bit_map = 1;
1446                 hw->dcb_info.num_tc = 1;
1447         }
1448         hw->hw_tc_map = bit_map;
1449
1450         return hns3_dcb_info_cfg(hns);
1451 }
1452
1453 static int
1454 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1455 {
1456         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1457         struct hns3_pf *pf = &hns->pf;
1458         struct hns3_hw *hw = &hns->hw;
1459         enum hns3_fc_status fc_status = hw->current_fc_status;
1460         enum hns3_fc_mode current_mode = hw->current_mode;
1461         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1462         int ret, status;
1463
1464         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1465             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1466                 return -ENOTSUP;
1467
1468         ret = hns3_dcb_schd_setup_hw(hw);
1469         if (ret) {
1470                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1471                 return ret;
1472         }
1473
1474         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1475                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1476                 if (dcb_rx_conf->nb_tcs == 0)
1477                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1478                 else
1479                         hw->dcb_info.pfc_en =
1480                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1481
1482                 hw->dcb_info.hw_pfc_map =
1483                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1484
1485                 ret = hns3_buffer_alloc(hw);
1486                 if (ret)
1487                         return ret;
1488
1489                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1490                 hw->current_mode = HNS3_FC_FULL;
1491                 ret = hns3_dcb_pause_setup_hw(hw);
1492                 if (ret) {
1493                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1494                         goto pfc_setup_fail;
1495                 }
1496         } else {
1497                 /*
1498                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1499                  * flag, the DCB information is configured, such as tc numbers.
1500                  * Therefore, refreshing the allocation of packet buffer is
1501                  * necessary.
1502                  */
1503                 ret = hns3_buffer_alloc(hw);
1504                 if (ret)
1505                         return ret;
1506         }
1507
1508         return 0;
1509
1510 pfc_setup_fail:
1511         hw->current_mode = current_mode;
1512         hw->current_fc_status = fc_status;
1513         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1514         status = hns3_buffer_alloc(hw);
1515         if (status)
1516                 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1517
1518         return ret;
1519 }
1520
1521 /*
1522  * hns3_dcb_configure - setup dcb related config
1523  * @hns: pointer to hns3 adapter
1524  * Returns 0 on success, negative value on failure.
1525  */
1526 int
1527 hns3_dcb_configure(struct hns3_adapter *hns)
1528 {
1529         struct hns3_hw *hw = &hns->hw;
1530         bool map_changed = false;
1531         uint8_t num_tc = 0;
1532         int ret;
1533
1534         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1535         if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1536                 ret = hns3_dcb_info_update(hns, num_tc);
1537                 if (ret) {
1538                         hns3_err(hw, "dcb info update failed: %d", ret);
1539                         return ret;
1540                 }
1541
1542                 ret = hns3_dcb_hw_configure(hns);
1543                 if (ret) {
1544                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1545                         return ret;
1546                 }
1547         }
1548
1549         return 0;
1550 }
1551
1552 int
1553 hns3_dcb_init_hw(struct hns3_hw *hw)
1554 {
1555         int ret;
1556
1557         ret = hns3_dcb_schd_setup_hw(hw);
1558         if (ret) {
1559                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1560                 return ret;
1561         }
1562
1563         ret = hns3_dcb_pause_setup_hw(hw);
1564         if (ret)
1565                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1566
1567         return ret;
1568 }
1569
1570 int
1571 hns3_dcb_init(struct hns3_hw *hw)
1572 {
1573         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1574         struct hns3_pf *pf = &hns->pf;
1575         uint16_t default_tqp_num;
1576         int ret;
1577
1578         PMD_INIT_FUNC_TRACE();
1579
1580         /*
1581          * According to the 'adapter_state' identifier, the following branch
1582          * is only executed to initialize default configurations of dcb during
1583          * the initializing driver process. Due to driver saving dcb-related
1584          * information before reset triggered, the reinit dev stage of the
1585          * reset process can not access to the branch, or those information
1586          * will be changed.
1587          */
1588         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1589                 hw->requested_mode = HNS3_FC_NONE;
1590                 hw->current_mode = hw->requested_mode;
1591                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1592                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1593
1594                 ret = hns3_dcb_info_init(hw);
1595                 if (ret) {
1596                         hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1597                         return ret;
1598                 }
1599
1600                 /*
1601                  * The number of queues configured by default cannot exceed
1602                  * the maximum number of queues for a single TC.
1603                  */
1604                 default_tqp_num = RTE_MIN(hw->rss_size_max,
1605                                           hw->tqps_num / hw->dcb_info.num_tc);
1606                 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1607                                                        default_tqp_num);
1608                 if (ret) {
1609                         hns3_err(hw,
1610                                  "update tc queue mapping failed, ret = %d.",
1611                                  ret);
1612                         return ret;
1613                 }
1614         }
1615
1616         /*
1617          * DCB hardware will be configured by following the function during
1618          * the initializing driver process and the reset process. However,
1619          * driver will restore directly configurations of dcb hardware based
1620          * on dcb-related information soft maintained when driver
1621          * initialization has finished and reset is coming.
1622          */
1623         ret = hns3_dcb_init_hw(hw);
1624         if (ret) {
1625                 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1626                 return ret;
1627         }
1628
1629         return 0;
1630 }
1631
1632 static int
1633 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1634 {
1635         struct hns3_hw *hw = &hns->hw;
1636         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1637         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1638         int ret;
1639
1640         ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1641         if (ret) {
1642                 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1643                          ret);
1644                 return ret;
1645         }
1646         ret = hns3_q_to_qs_map(hw);
1647         if (ret)
1648                 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1649
1650         return ret;
1651 }
1652
1653 int
1654 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1655 {
1656         struct hns3_hw *hw = &hns->hw;
1657         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1658         int ret;
1659
1660         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1661                 ret = hns3_dcb_configure(hns);
1662                 if (ret)
1663                         hns3_err(hw, "Failed to config dcb: %d", ret);
1664         } else {
1665                 /*
1666                  * Update queue map without PFC configuration,
1667                  * due to queues reconfigured by user.
1668                  */
1669                 ret = hns3_update_queue_map_configure(hns);
1670                 if (ret)
1671                         hns3_err(hw,
1672                                  "Failed to update queue mapping configure: %d",
1673                                  ret);
1674         }
1675
1676         return ret;
1677 }
1678
1679 /*
1680  * hns3_dcb_pfc_enable - Enable priority flow control
1681  * @dev: pointer to ethernet device
1682  *
1683  * Configures the pfc settings for one porority.
1684  */
1685 int
1686 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1687 {
1688         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1689         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1690         enum hns3_fc_status fc_status = hw->current_fc_status;
1691         enum hns3_fc_mode current_mode = hw->current_mode;
1692         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1693         uint8_t pfc_en = hw->dcb_info.pfc_en;
1694         uint8_t priority = pfc_conf->priority;
1695         uint16_t pause_time = pf->pause_time;
1696         int ret, status;
1697
1698         pf->pause_time = pfc_conf->fc.pause_time;
1699         hw->current_mode = hw->requested_mode;
1700         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1701         hw->dcb_info.pfc_en |= BIT(priority);
1702         hw->dcb_info.hw_pfc_map =
1703                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1704         ret = hns3_buffer_alloc(hw);
1705         if (ret)
1706                 goto pfc_setup_fail;
1707
1708         /*
1709          * The flow control mode of all UPs will be changed based on
1710          * current_mode coming from user.
1711          */
1712         ret = hns3_dcb_pause_setup_hw(hw);
1713         if (ret) {
1714                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1715                 goto pfc_setup_fail;
1716         }
1717
1718         return 0;
1719
1720 pfc_setup_fail:
1721         hw->current_mode = current_mode;
1722         hw->current_fc_status = fc_status;
1723         pf->pause_time = pause_time;
1724         hw->dcb_info.pfc_en = pfc_en;
1725         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1726         status = hns3_buffer_alloc(hw);
1727         if (status)
1728                 hns3_err(hw, "recover packet buffer fail: %d", status);
1729
1730         return ret;
1731 }
1732
1733 /*
1734  * hns3_fc_enable - Enable MAC pause
1735  * @dev: pointer to ethernet device
1736  *
1737  * Configures the MAC pause settings.
1738  */
1739 int
1740 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1741 {
1742         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1744         enum hns3_fc_status fc_status = hw->current_fc_status;
1745         enum hns3_fc_mode current_mode = hw->current_mode;
1746         uint16_t pause_time = pf->pause_time;
1747         int ret;
1748
1749         pf->pause_time = fc_conf->pause_time;
1750         hw->current_mode = hw->requested_mode;
1751
1752         /*
1753          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1754          * of flow control is configured to be HNS3_FC_NONE.
1755          */
1756         if (hw->current_mode == HNS3_FC_NONE)
1757                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1758         else
1759                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1760
1761         ret = hns3_dcb_pause_setup_hw(hw);
1762         if (ret) {
1763                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1764                 goto setup_fc_fail;
1765         }
1766
1767         return 0;
1768
1769 setup_fc_fail:
1770         hw->current_mode = current_mode;
1771         hw->current_fc_status = fc_status;
1772         pf->pause_time = pause_time;
1773
1774         return ret;
1775 }